From 5352fc24d9fc50336119d2c199b489f074f5948f Mon Sep 17 00:00:00 2001
From: abobrov <abobrov@localhost>
Date: Fri, 13 Feb 2009 17:03:19 +0000
Subject: [PATCH] - land NDB Backend implementation.

---
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/DatabaseContainer.java                             |   83 
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendAddOperation.java      |   60 
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/BackendImpl.java                                   | 2101 +++++++++
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/ExportJob.java                                     |  293 +
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendModifyDNOperation.java |   71 
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/package-info.java                           |   36 
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/RootContainer.java                                 |  499 ++
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/Importer.java                           |  643 ++
 opendj-sdk/opends/resource/config/ndbconfig.ldif                                                               |   30 
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/package-info.java                                  |   37 
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBBindOperation.java                       |  251 +
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/AbstractTransaction.java                           |  188 
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBCompareOperation.java                    |  309 +
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/DNContext.java                          |  363 +
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/WorkThread.java                         |  240 +
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBAddOperation.java                        |  661 ++
 opendj-sdk/opends/src/messages/messages/ndb.properties                                                         |  247 +
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBModifyOperation.java                     |  529 ++
 opendj-sdk/opends/src/admin/defn/org/opends/server/admin/std/NdbBackendConfiguration.xml                       |  400 +
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendCompareOperation.java  |   30 
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/NDBException.java                                  |   76 
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBSearchOperation.java                     |  437 +
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendModifyOperation.java   |  119 
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBModifyDNOperation.java                   |  532 ++
 opendj-sdk/opends/build.xml                                                                                    |  117 
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendBindOperation.java     |   66 
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendWorkflowElement.java   |    7 
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/EntryContainer.java                                | 2205 +++++++++
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/WorkElement.java                        |  104 
 opendj-sdk/opends/src/admin/defn/org/opends/server/admin/std/NdbIndexConfiguration.xml                         |  187 
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendSearchOperation.java   |   44 
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBWorkflowElement.java                     |  436 +
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/IndexFilter.java                                   |  244 +
 opendj-sdk/opends/src/messages/src/org/opends/messages/Category.java                                           |    7 
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendDeleteOperation.java   |   55 
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/package-info.java                       |   36 
 opendj-sdk/opends/src/server/org/opends/server/backends/ndb/OperationContainer.java                            | 1708 +++++++
 opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBDeleteOperation.java                     |  428 +
 opendj-sdk/opends/resource/schema/02-config.ldif                                                               |   75 
 39 files changed, 13,787 insertions(+), 167 deletions(-)

diff --git a/opendj-sdk/opends/build.xml b/opendj-sdk/opends/build.xml
index 0b72255..078072e 100644
--- a/opendj-sdk/opends/build.xml
+++ b/opendj-sdk/opends/build.xml
@@ -22,7 +22,7 @@
  ! CDDL HEADER END
  !
  !
- !      Copyright 2006-2008 Sun Microsystems, Inc.
+ !      Copyright 2006-2009 Sun Microsystems, Inc.
  ! -->
 
 <project name="Directory Server" basedir="." default="package">
@@ -41,6 +41,7 @@
   <property name="src.dir"          location="src/server"              />
   <property name="build.dir"        location="build"                   />
   <property name="classes.dir"      location="${build.dir}/classes"    />
+  <property name="build.lib.dir"    location="${build.dir}/lib"        />
   <property name="lib.dir"          location="lib"                     />
   <property name="ext.dir"          location="ext"                     />
   <property name="package.dir"      location="${build.dir}/package"    />
@@ -161,6 +162,23 @@
   <property name="snmp.classes.dir"
             location="${classes.dir}/org/opends/server/snmp" />
 
+  <!-- Condition properties for NDB Backend build. -->
+  <condition property="ismysqldirpresent">
+    <available file="${mysql.lib.dir}" type="dir" />
+  </condition>
+  <condition property="exclude.ndb.xml" value=""
+             else="**/Ndb*">
+             <available file="${mysql.lib.dir}" type="dir" />
+  </condition>
+  <condition property="exclude.ndb.src" value=""
+             else="org/opends/server/backends/ndb/**,
+                   org/opends/server/workflowelement/ndb/**">
+             <available file="${mysql.lib.dir}" type="dir" />
+  </condition>
+
+  <!-- Property for excluding NDB Backend config. -->
+  <property name="exclude.ndb.config" value="ndbconfig.ldif" />
+
   <!-- Create a package bundle containing the DSML library. -->
   <target name="dsml" depends="predsml,package"
        description="Build a Directory Server package bundle with DSML.">
@@ -288,8 +306,25 @@
     <genmsg sourceProps="${msg.prop.dir}/servicetag.properties"
             destJava="${msg.javagen.dir}/org/opends/messages/ServiceTagMessages.java">
     </genmsg>
+
+    <antcall target="generatendbmessages" />
+    
   </target>
 
+  <!-- Generate NDB Backend messages if needed -->
+  <target name="generatendbmessages" if="ismysqldirpresent">
+    <typedef name="genmsg"
+             classname="org.opends.build.tools.GenerateMessageFile" >
+      <classpath>
+        <fileset dir="${build.dir}/build-tools">
+          <include name="*.jar" />
+        </fileset>
+      </classpath>
+    </typedef>
+    <genmsg sourceProps="${msg.prop.dir}/ndb.properties"
+            destJava="${msg.javagen.dir}/org/opends/messages/NdbMessages.java">
+    </genmsg>
+  </target>
 
   <!-- Remove all dynamically-generated build files. -->
   <target name="clean" depends="init,cleanadmin,cleanmessages,cleansnmp"
@@ -575,9 +610,13 @@
        depends="init,checkjavaversion,dynamicconstants,generatemessages,compileadmin"
        description="Compile the Directory Server source files.">
     <mkdir dir="${classes.dir}" />
+    <mkdir dir="${build.lib.dir}" />
 
+    <!-- Copy NDB Backend dependencies if necessary -->
+    <antcall target="copyndbdeps" />
+    
     <javac srcdir="${src.dir}:${admin.src.dir}:${msg.src.dir}:${msg.javagen.dir}:${ads.src.dir}:${quicksetup.src.dir}:${guitools.src.dir}"
-         destdir="${classes.dir}" debug="on" debuglevel="${build.debuglevel}"
+         destdir="${classes.dir}" excludes="${exclude.ndb.src}" debug="on" debuglevel="${build.debuglevel}"
          source="1.5" target="1.5" deprecation="true" fork="true"
          memoryInitialSize="${MEM}" memoryMaximumSize="${MEM}">
       <compilerarg value="-Xlint:all" />
@@ -589,6 +628,9 @@
         <fileset dir="${build.dir}/build-tools">
           <include name="build-tools.jar" />
         </fileset>
+        <fileset dir="${build.lib.dir}">
+          <include name="*.jar" />
+        </fileset>
       </classpath>
     </javac>
 
@@ -743,10 +785,13 @@
     <!-- Regenerate configuration files if necessary -->
     <antcall target="compileadmin" />
 
+    <!-- Copy NDB Backend dependencies if necessary -->
+    <antcall target="copyndbdeps" />
+
     <!-- Recreate the classes directory and recompile into it. -->
     <mkdir dir="${classes.dir}" />
     <javac srcdir="${src.dir}:${msg.src.dir}:${msg.javagen.dir}:${admin.src.dir}:${ads.src.dir}:${quicksetup.src.dir}:${guitools.src.dir}"
-         destdir="${classes.dir}"
+         destdir="${classes.dir}" excludes="${exclude.ndb.src}"
          debug="on" debuglevel="${build.debuglevel}" source="1.5" target="1.5"
          deprecation="true" fork="true" memoryInitialSize="${MEM}"
          memoryMaximumSize="${MEM}">
@@ -759,6 +804,9 @@
         <fileset dir="${build.dir}/build-tools">
           <include name="build-tools.jar" />
         </fileset>
+        <fileset dir="${build.lib.dir}">
+          <include name="*.jar" />
+        </fileset>
       </classpath>
     </javac>
 
@@ -830,6 +878,11 @@
     <jar jarfile="${pdir}/lib/quicksetup.jar"
          basedir="${quicksetup.classes.dir}" compress="true" index="true" />
 
+    <!-- Copy over external dependencies. -->
+    <copy todir="${pdir}/lib">
+      <fileset file="${build.lib.dir}/*.jar" />
+    </copy>
+    
     <!-- Regenerate example plugin. -->
     <antcall target="example-plugin" />
   </target>
@@ -965,11 +1018,13 @@
     <fixcrlf srcDir="${scripts.dir}" destDir="${pdir}/lib" includes="_client-script.bat,_server-script.bat,_mixed-script.bat,_script-util.bat,setcp.bat" eol="crlf" />
 
     <copy todir="${pdir}/config">
-      <fileset file="${config.dir}/*" />
+      <fileset file="${config.dir}/*" excludes="${exclude.ndb.config}" />
     </copy>
 
     <antcall target="package-snmp" />
 
+    <antcall target="packagendb" />
+
     <copy file="${pdir}/config/config.ldif"
          tofile="${pdir}/config/upgrade/config.ldif.${REVISION_NUMBER}" />
 
@@ -1272,7 +1327,7 @@
         <dirset dir="${quicksetup.classes.dir}" />
       </classpath>
 
-      <packageset dir="${src.dir}" />
+      <packageset dir="${src.dir}" excludes="${exclude.ndb.src}" />
       <packageset dir="${admin.src.dir}" />
       <packageset dir="${ads.src.dir}" />
       <packageset dir="${dsml.src.dir}" />
@@ -1433,8 +1488,11 @@
     <echo message="Performing partial rebuild (OpenDS zip package found)"/>
     <mkdir dir="${classes.dir}" />
 
+    <!-- Copy NDB Backend dependencies if necessary -->
+    <antcall target="copyndbdeps" />
+
     <javac srcdir="${src.dir}:${admin.src.dir}:${msg.src.dir}:${msg.javagen.dir}:${ads.src.dir}:${quicksetup.src.dir}:${guitools.src.dir}"
-        destdir="${classes.dir}" debug="on" debuglevel="${build.debuglevel}"
+        destdir="${classes.dir}" excludes="${exclude.ndb.src}" debug="on" debuglevel="${build.debuglevel}"
         source="1.5" target="1.5" deprecation="true" fork="true"
         memoryInitialSize="${MEM}" memoryMaximumSize="${MEM}">
       <compilerarg value="-Xlint:all" />
@@ -1446,6 +1504,9 @@
         <fileset dir="${build.dir}/build-tools">
           <include name="build-tools.jar" />
         </fileset>
+        <fileset dir="${build.lib.dir}">
+          <include name="*.jar" />
+        </fileset>
       </classpath>
     </javac>
 
@@ -2146,6 +2207,7 @@
       <arg value="-buildfile" />
       <arg value="${ant.file}" />
       <arg value="-quiet" />
+      <arg value="-Dexclude.ndb.xml=${exclude.ndb.xml}" />
       <arg value="compileadminsubtask" />
       <env key="ANT_OPTS" value="-Xmx${MEM}" />
     </exec>
@@ -2155,7 +2217,8 @@
 
   <target name="compileadminsubtask">
     <!-- Generate introspection API for core administration components. -->
-    <xslt basedir="${admin.defn.dir}" destdir="${admin.src.dir}" includes="**/*Configuration.xml" style="${admin.rules.dir}/metaMO.xsl">
+    <xslt basedir="${admin.defn.dir}" destdir="${admin.src.dir}" includes="**/*Configuration.xml"
+          excludes="${exclude.ndb.xml}" style="${admin.rules.dir}/metaMO.xsl">
       <regexpmapper handledirsep="true" from="^(.*)/([^/]+)Configuration\.xml$$" to="\1/meta/\2CfgDefn.java" />
       <param name="base-dir" expression="${admin.defn.dir}" />
     </xslt>
@@ -2166,7 +2229,8 @@
     </xslt>
 
     <!-- Generate client API for core administration components. -->
-    <xslt basedir="${admin.defn.dir}" destdir="${admin.src.dir}" includes="**/*Configuration.xml" style="${admin.rules.dir}/clientMO.xsl">
+    <xslt basedir="${admin.defn.dir}" destdir="${admin.src.dir}" includes="**/*Configuration.xml"
+          excludes="${exclude.ndb.xml}" style="${admin.rules.dir}/clientMO.xsl">
       <regexpmapper handledirsep="true" from="^(.*)/([^/]+)Configuration\.xml$$" to="\1/client/\2CfgClient.java" />
       <param name="base-dir" expression="${admin.defn.dir}" />
     </xslt>
@@ -2177,7 +2241,8 @@
     </xslt>
 
     <!-- Generate server API for core administration components. -->
-    <xslt basedir="${admin.defn.dir}" destdir="${admin.src.dir}" includes="**/*Configuration.xml" style="${admin.rules.dir}/serverMO.xsl">
+    <xslt basedir="${admin.defn.dir}" destdir="${admin.src.dir}" includes="**/*Configuration.xml"
+          excludes="${exclude.ndb.xml}" style="${admin.rules.dir}/serverMO.xsl">
       <regexpmapper handledirsep="true" from="^(.*)/([^/]+)Configuration\.xml$$" to="\1/server/\2Cfg.java" />
       <param name="base-dir" expression="${admin.defn.dir}" />
     </xslt>
@@ -2189,19 +2254,22 @@
 
     <!-- Generate LDAP profile for core administration components. -->
     <mkdir dir="${classes.dir}" />
-    <xslt basedir="${admin.defn.dir}" destdir="${classes.dir}/admin/profiles/ldap" includes="**/*Configuration.xml" style="${admin.rules.dir}/ldapMOProfile.xsl">
+    <xslt basedir="${admin.defn.dir}" destdir="${classes.dir}/admin/profiles/ldap" includes="**/*Configuration.xml"
+          excludes="${exclude.ndb.xml}" style="${admin.rules.dir}/ldapMOProfile.xsl">
       <regexpmapper handledirsep="true" from="^(.*)/([^/]+)Configuration\.xml$$" to="\1/meta/\2CfgDefn.properties" />
       <param name="base-dir" expression="${admin.defn.dir}" />
     </xslt>
 
     <!-- Generate CLI profile for core administration components. -->
-    <xslt basedir="${admin.defn.dir}" destdir="${classes.dir}/admin/profiles/cli" includes="**/*Configuration.xml" style="${admin.rules.dir}/cliMOProfile.xsl">
+    <xslt basedir="${admin.defn.dir}" destdir="${classes.dir}/admin/profiles/cli" includes="**/*Configuration.xml"
+          excludes="${exclude.ndb.xml}" style="${admin.rules.dir}/cliMOProfile.xsl">
       <regexpmapper handledirsep="true" from="^(.*)/([^/]+)Configuration\.xml$$" to="\1/meta/\2CfgDefn.properties" />
       <param name="base-dir" expression="${admin.defn.dir}" />
     </xslt>
 
     <!-- Generate I18N messages for core administration components. -->
-    <xslt basedir="${admin.defn.dir}" destdir="${classes.dir}/admin/messages" includes="**/*Configuration.xml" style="${admin.rules.dir}/messagesMO.xsl">
+    <xslt basedir="${admin.defn.dir}" destdir="${classes.dir}/admin/messages" includes="**/*Configuration.xml"
+          excludes="${exclude.ndb.xml}" style="${admin.rules.dir}/messagesMO.xsl">
       <regexpmapper handledirsep="true" from="^(.*)/([^/]+)Configuration\.xml$$" to="\1/meta/\2CfgDefn.properties" />
       <param name="base-dir" expression="${admin.defn.dir}" />
     </xslt>
@@ -2209,7 +2277,8 @@
     <!-- Generate manifest file for core administration components. -->
     <tempfile property="admin.temp.dir" destDir="${build.dir}" prefix="tmp" />
     <mkdir dir="${admin.temp.dir}" />
-    <xslt basedir="${admin.defn.dir}" destdir="${admin.temp.dir}" extension=".manifest" includes="**/*Configuration.xml" style="${admin.rules.dir}/manifestMO.xsl"/>
+    <xslt basedir="${admin.defn.dir}" destdir="${admin.temp.dir}" extension=".manifest" includes="**/*Configuration.xml"
+          excludes="${exclude.ndb.xml}" style="${admin.rules.dir}/manifestMO.xsl"/>
     <concat destfile="${classes.dir}/admin/core.manifest">
       <fileset dir="${admin.temp.dir}" includes="**/*.manifest" />
     </concat>
@@ -2437,4 +2506,26 @@
 
   <import file="build-svr4.xml"/>
 
+  <!-- Copy NDB Backend dependencies to build lib directory -->
+  <target name="copyndbdeps" if="ismysqldirpresent"
+    description="Internal target to copy NDB Backend dependencies">
+    <!-- Blanket copy of all jars found at mysql.lib location -->
+    <copy todir="${build.lib.dir}">
+      <fileset file="${mysql.lib.dir}/*.jar" />
+    </copy>
+  </target>
+
+  <!-- Package NDB Backend with Directory Server distribution -->
+  <target name="packagendb" if="ismysqldirpresent"
+    description="Internal target to package NDB Backend dependencies">
+    <echo message="Packaging with NDB Backend dependencies"/>
+    <copy todir="${pdir}/lib">
+      <fileset file="${mysql.lib.dir}/*.jar" />
+    </copy>
+    <!-- Concat NDB Backend config entry to default config -->
+    <concat destfile="${pdir}/config/config.ldif" append="true">
+        <filelist dir="${config.dir}" files="ndbconfig.ldif"/>
+    </concat>
+  </target>
+
 </project>
diff --git a/opendj-sdk/opends/resource/config/ndbconfig.ldif b/opendj-sdk/opends/resource/config/ndbconfig.ldif
new file mode 100644
index 0000000..a18a12d
--- /dev/null
+++ b/opendj-sdk/opends/resource/config/ndbconfig.ldif
@@ -0,0 +1,30 @@
+
+dn: ds-cfg-backend-id=ndbRoot,cn=Backends,cn=config
+objectClass: top
+objectClass: ds-cfg-backend
+objectClass: ds-cfg-ndb-backend
+ds-cfg-enabled: false
+ds-cfg-java-class: org.opends.server.backends.ndb.BackendImpl
+ds-cfg-backend-id: ndbRoot
+ds-cfg-writability-mode: enabled
+ds-cfg-base-dn: dc=example,dc=com
+ds-cfg-ndb-connect-string: localhost
+ds-cfg-sql-connect-string: localhost
+ds-cfg-ndb-dbname: ldap
+ds-cfg-sql-user: root
+ds-cfg-ndb-attr-len: 128
+ds-cfg-ndb-attr-blob: audio
+ds-cfg-ndb-attr-blob: photo
+ds-cfg-ndb-attr-blob: jpegPhoto
+ds-cfg-ndb-attr-blob: personalSignature
+ds-cfg-ndb-attr-blob: userCertificate
+ds-cfg-ndb-attr-blob: caCertificate
+ds-cfg-ndb-attr-blob: authorityRevocationList
+ds-cfg-ndb-attr-blob: certificateRevocationList
+ds-cfg-ndb-attr-blob: crossCertificatePair
+ds-cfg-ndb-attr-blob: userSMIMECertificate
+ds-cfg-ndb-attr-blob: userPKCS12
+ds-cfg-ndb-thread-count: 24
+ds-cfg-ndb-num-connections: 4
+ds-cfg-deadlock-retry-limit: 10
+
diff --git a/opendj-sdk/opends/resource/schema/02-config.ldif b/opendj-sdk/opends/resource/schema/02-config.ldif
index 3064dde..5bc49bb 100644
--- a/opendj-sdk/opends/resource/schema/02-config.ldif
+++ b/opendj-sdk/opends/resource/schema/02-config.ldif
@@ -1095,6 +1095,11 @@
   SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
   SINGLE-VALUE
   X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.225
+  NAME 'ds-cfg-deadlock-retry-limit'
+  SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+  SINGLE-VALUE
+  X-ORIGIN 'OpenDS Directory Server' )
 attributeTypes: ( 1.3.6.1.4.1.26027.1.1.226
   NAME 'ds-cfg-db-evictor-lru-only'
   SYNTAX 1.3.6.1.4.1.1466.115.121.1.7
@@ -2323,6 +2328,50 @@
   NAME 'ds-cfg-collation'
   SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
   X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.501
+  NAME 'ds-cfg-ndb-connect-string'
+  SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+  SINGLE-VALUE
+  X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.502
+  NAME 'ds-cfg-ndb-thread-count'
+  SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+  SINGLE-VALUE
+  X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.503
+  NAME 'ds-cfg-ndb-num-connections'
+  SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+  SINGLE-VALUE
+  X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.504
+  NAME 'ds-cfg-sql-user'
+  SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+  SINGLE-VALUE
+  X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.505
+  NAME 'ds-cfg-sql-passwd'
+  SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+  SINGLE-VALUE
+  X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.506
+  NAME 'ds-cfg-ndb-dbname'
+  SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+  SINGLE-VALUE
+  X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.507
+  NAME 'ds-cfg-ndb-attr-len'
+  SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
+  SINGLE-VALUE
+  X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.508
+  NAME 'ds-cfg-ndb-attr-blob'
+  SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+  X-ORIGIN 'OpenDS Directory Server' )
+attributeTypes: ( 1.3.6.1.4.1.26027.1.1.509
+  NAME 'ds-cfg-sql-connect-string'
+  SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
+  SINGLE-VALUE
+  X-ORIGIN 'OpenDS Directory Server' )
 attributeTypes: ( 1.3.6.1.4.1.26027.1.1.511
   NAME 'ds-cfg-quality-of-protection'
   SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
@@ -3943,6 +3992,32 @@
   MUST ( ds-cfg-matching-rule-type $
         ds-cfg-collation )
   X-ORIGIN 'OpenDS Directory Server' )
+objectClasses: ( 1.3.6.1.4.1.26027.1.2.196
+  NAME 'ds-cfg-ndb-backend'
+  SUP ds-cfg-backend
+  STRUCTURAL
+  MUST ( ds-cfg-ndb-connect-string $
+         ds-cfg-sql-connect-string $
+         ds-cfg-ndb-dbname )
+  MAY ( ds-cfg-sql-user $
+        ds-cfg-sql-passwd $
+        ds-cfg-ndb-attr-len $
+        ds-cfg-ndb-attr-blob $
+        ds-cfg-ndb-thread-count $
+        ds-cfg-ndb-num-connections $
+        ds-cfg-deadlock-retry-limit $
+        ds-cfg-import-queue-size $
+        ds-cfg-import-thread-count )
+  X-ORIGIN 'OpenDS Directory Server' )
+objectClasses: ( 1.3.6.1.4.1.26027.1.2.197
+  NAME 'ds-cfg-ndb-index'
+  SUP top
+  STRUCTURAL
+  MUST ( ds-cfg-attribute $
+         ds-cfg-index-type )
+  MAY ( ds-cfg-index-entry-limit $
+        ds-cfg-substring-length )
+  X-ORIGIN 'OpenDS Directory Server' )
 objectClasses: ( 1.3.6.1.4.1.26027.1.2.950
   NAME 'ds-mon-branch'
   SUP top
diff --git a/opendj-sdk/opends/src/admin/defn/org/opends/server/admin/std/NdbBackendConfiguration.xml b/opendj-sdk/opends/src/admin/defn/org/opends/server/admin/std/NdbBackendConfiguration.xml
new file mode 100644
index 0000000..d82893e
--- /dev/null
+++ b/opendj-sdk/opends/src/admin/defn/org/opends/server/admin/std/NdbBackendConfiguration.xml
@@ -0,0 +1,400 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ! CDDL HEADER START
+  !
+  ! The contents of this file are subject to the terms of the
+  ! Common Development and Distribution License, Version 1.0 only
+  ! (the "License").  You may not use this file except in compliance
+  ! with the License.
+  !
+  ! You can obtain a copy of the license at
+  ! trunk/opends/resource/legal-notices/OpenDS.LICENSE
+  ! or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+  ! See the License for the specific language governing permissions
+  ! and limitations under the License.
+  !
+  ! When distributing Covered Code, include this CDDL HEADER in each
+  ! file and include the License file at
+  ! trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+  ! add the following below this CDDL HEADER, with the fields enclosed
+  ! by brackets "[]" replaced with your own identifying information:
+  !      Portions Copyright [yyyy] [name of copyright owner]
+  !
+  ! CDDL HEADER END
+  !
+  !
+  !      Copyright 2008-2009 Sun Microsystems, Inc.
+  ! -->
+<adm:managed-object name="ndb-backend"
+  plural-name="ndb-backends" package="org.opends.server.admin.std"
+  extends="backend" xmlns:adm="http://www.opends.org/admin"
+  xmlns:ldap="http://www.opends.org/admin-ldap"
+  xmlns:cli="http://www.opends.org/admin-cli">
+  <adm:synopsis>
+    The
+    <adm:user-friendly-name />
+    uses the NDB to store user-provided data.
+  </adm:synopsis>
+  <adm:description>
+    The
+    <adm:user-friendly-name />
+    stores the entries in NDB Cluster using shared data model
+    which allows for simultanious LDAP/SQL datastore access.
+  </adm:description>
+  <adm:profile name="ldap">
+    <ldap:object-class>
+      <ldap:name>ds-cfg-ndb-backend</ldap:name>
+      <ldap:superior>ds-cfg-backend</ldap:superior>
+    </ldap:object-class>
+  </adm:profile>
+  <adm:relation name="ndb-index">
+    <adm:one-to-many naming-property="attribute">
+      <adm:default-managed-object name="aci">
+        <adm:property name="index-type">
+          <adm:value>presence</adm:value>
+        </adm:property>
+        <adm:property name="attribute">
+          <adm:value>aci</adm:value>
+        </adm:property>
+      </adm:default-managed-object>
+      <adm:default-managed-object name="entryUUID">
+        <adm:property name="index-type">
+          <adm:value>equality</adm:value>
+        </adm:property>
+        <adm:property name="attribute">
+          <adm:value>entryUUID</adm:value>
+        </adm:property>
+      </adm:default-managed-object>
+      <adm:default-managed-object name="objectClass">
+        <adm:property name="index-type">
+          <adm:value>equality</adm:value>
+        </adm:property>
+        <adm:property name="attribute">
+          <adm:value>objectClass</adm:value>
+        </adm:property>
+      </adm:default-managed-object>
+      <adm:default-managed-object name="ds-sync-hist">
+        <adm:property name="index-type">
+          <adm:value>ordering</adm:value>
+        </adm:property>
+        <adm:property name="attribute">
+          <adm:value>ds-sync-hist</adm:value>
+        </adm:property>
+      </adm:default-managed-object>
+    </adm:one-to-many>
+    <adm:profile name="ldap">
+      <ldap:rdn-sequence>cn=Index</ldap:rdn-sequence>
+    </adm:profile>
+    <adm:profile name="cli">
+      <cli:relation>
+        <cli:default-property name="index-type" />
+      </cli:relation>
+    </adm:profile>
+  </adm:relation>
+  <adm:property-override name="java-class" advanced="true">
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>
+          org.opends.server.backends.ndb.BackendImpl
+        </adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+  </adm:property-override>
+  <adm:property name="ndb-connect-string" mandatory="true">
+    <adm:synopsis>
+      Specifies the NDB connect string.
+    </adm:synopsis>
+    <adm:description>
+      IP addresses or hostnames with portnumbers may be provided.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>localhost</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:string />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-ndb-connect-string</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="sql-connect-string" mandatory="true">
+    <adm:synopsis>
+      Specifies the SQL connect string.
+    </adm:synopsis>
+    <adm:description>
+      IP addresses or hostnames with portnumbers may be provided.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>localhost</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:string />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-sql-connect-string</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="sql-user">
+    <adm:synopsis>
+      Specifies the SQL database user on whose behalf
+      the connection is being made.
+    </adm:synopsis>
+    <adm:description>
+      SQL user name may be provided.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>root</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:string />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-sql-user</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="sql-passwd">
+    <adm:synopsis>
+      Specifies the SQL database user password.
+    </adm:synopsis>
+    <adm:description>
+      SQL user password may be provided.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:undefined />
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:string />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-sql-passwd</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="ndb-dbname" mandatory="true">
+    <adm:synopsis>
+      Specifies the SQL/NDB database name.
+    </adm:synopsis>
+    <adm:description>
+      SQL/NDB database name may be provided.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>ldap</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:string />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-ndb-dbname</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="ndb-num-connections" advanced="true">
+    <adm:synopsis>
+      Specifies the number of NDB connections.
+    </adm:synopsis>
+    <adm:description>
+      Logical connections made to NDB Cluster.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>4</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="1" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-ndb-num-connections</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="ndb-thread-count" advanced="true">
+    <adm:synopsis>
+      Specifies the number of threads that is used for concurrent
+      NDB processing.
+    </adm:synopsis>
+    <adm:description>
+      This should generally be equal to the number of worker threads.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart/>
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>24</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="1" upper-limit="128" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-ndb-thread-count</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="ndb-attr-len" advanced="true">
+    <adm:synopsis>
+      Specifies the attribute length.
+    </adm:synopsis>
+    <adm:description>
+      This should reflect SQL/NDB attribute column length.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart/>
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>128</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="1" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-ndb-attr-len</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="ndb-attr-blob" multi-valued="true" advanced="true">
+    <adm:synopsis>
+      Specifies the blob attribute.
+    </adm:synopsis>
+    <adm:description>
+      This should specify which attribute to treat as a blob.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:undefined />
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:string />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-ndb-attr-blob</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="deadlock-retry-limit" advanced="true">
+    <adm:synopsis>
+      Specifies the number of times that the server should retry an
+      attempted operation in the backend if a deadlock results from
+      two concurrent requests that interfere with each other in a
+      conflicting manner.
+    </adm:synopsis>
+    <adm:description>
+      A value of "0" indicates no limit.
+    </adm:description>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>10</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="0" upper-limit="2147483647" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-deadlock-retry-limit</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="import-queue-size" advanced="true">
+    <adm:synopsis>
+      Specifies the size (in number of entries) of the queue that is
+      used to hold the entries read during an LDIF import.
+    </adm:synopsis>
+    <adm:requires-admin-action>
+      <adm:none>
+        <adm:synopsis>
+          Changes do not take effect for any import that may already
+          be in progress.
+        </adm:synopsis>
+      </adm:none>
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>100</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="1" upper-limit="2147483647" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-import-queue-size</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="import-thread-count" advanced="true">
+    <adm:synopsis>
+      Specifies the number of threads that is used for concurrent
+      processing during an LDIF import.
+    </adm:synopsis>
+    <adm:description>
+      This should generally be a small multiple (for example, 2x) of the number
+      of CPUs in the system for a traditional system, or equal to the
+      number of CPU strands for a CMT system.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:none>
+        <adm:synopsis>
+          Changes do not take effect for any import that may already
+          be in progress.
+        </adm:synopsis>
+      </adm:none>
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>8</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="1" upper-limit="2147483647" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-import-thread-count</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+</adm:managed-object>
diff --git a/opendj-sdk/opends/src/admin/defn/org/opends/server/admin/std/NdbIndexConfiguration.xml b/opendj-sdk/opends/src/admin/defn/org/opends/server/admin/std/NdbIndexConfiguration.xml
new file mode 100644
index 0000000..859bf5d
--- /dev/null
+++ b/opendj-sdk/opends/src/admin/defn/org/opends/server/admin/std/NdbIndexConfiguration.xml
@@ -0,0 +1,187 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ! CDDL HEADER START
+  !
+  ! The contents of this file are subject to the terms of the
+  ! Common Development and Distribution License, Version 1.0 only
+  ! (the "License").  You may not use this file except in compliance
+  ! with the License.
+  !
+  ! You can obtain a copy of the license at
+  ! trunk/opends/resource/legal-notices/OpenDS.LICENSE
+  ! or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+  ! See the License for the specific language governing permissions
+  ! and limitations under the License.
+  !
+  ! When distributing Covered Code, include this CDDL HEADER in each
+  ! file and include the License file at
+  ! trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+  ! add the following below this CDDL HEADER, with the fields enclosed
+  ! by brackets "[]" replaced with your own identifying information:
+  !      Portions Copyright [yyyy] [name of copyright owner]
+  !
+  ! CDDL HEADER END
+  !
+  !
+  !      Copyright 2008-2009 Sun Microsystems, Inc.
+  ! -->
+<adm:managed-object name="ndb-index" plural-name="ndb-indexes"
+  package="org.opends.server.admin.std"
+  xmlns:adm="http://www.opends.org/admin"
+  xmlns:ldap="http://www.opends.org/admin-ldap">
+  <adm:synopsis>
+    <adm:user-friendly-plural-name />
+    are used to store information that makes it possible to locate
+    entries very quickly when processing search operations.
+  </adm:synopsis>
+  <adm:description>
+    Indexing is performed on a per-attribute level and different types
+    of indexing may be performed for different kinds of attributes, based
+    on how they are expected to be accessed during search operations.
+  </adm:description>
+  <adm:tag name="database" />
+  <adm:profile name="ldap">
+    <ldap:object-class>
+      <ldap:name>ds-cfg-ndb-index</ldap:name>
+      <ldap:superior>top</ldap:superior>
+    </ldap:object-class>
+  </adm:profile>
+  <adm:property name="attribute" mandatory="true" read-only="true">
+    <adm:synopsis>
+      Specifies the name of the attribute for which the index is to
+      be maintained.
+    </adm:synopsis>
+    <adm:syntax>
+      <adm:attribute-type />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-attribute</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="index-entry-limit">
+    <adm:synopsis>
+      Specifies the maximum number of entries that are allowed
+      to match a given index key before that particular index key is no
+      longer maintained.
+    </adm:synopsis>
+    <adm:description>
+      This is analogous to the ALL IDs threshold in the Sun Java System
+      Directory Server. If this is specified, its value overrides any
+      backend-wide configuration. For no limit, use 0 for the value.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:other>
+        <adm:synopsis>
+          If any index keys have already reached this limit, indexes
+          must be rebuilt before they will be allowed to use the
+          new limit.
+        </adm:synopsis>
+      </adm:other>
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:inherited>
+        <adm:relative property-name="index-entry-limit" offset="1"
+          managed-object-name="ndb-backend" />
+      </adm:inherited>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="0" upper-limit="2147483647">
+        <adm:unit-synopsis>Number of entries</adm:unit-synopsis>
+      </adm:integer>
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-index-entry-limit</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="index-type" mandatory="true"
+    multi-valued="true">
+    <adm:synopsis>
+      Specifies the type(s) of indexing that should be performed
+      for the associated attribute.
+    </adm:synopsis>
+    <adm:description>
+      For equality, presence, and substring index types, the associated
+      attribute type must have a corresponding matching rule.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:other>
+        <adm:synopsis>
+          If any new index types are added for an attribute, and 
+          values for that attribute already exist in the
+          database, the index must be rebuilt before it
+          will be accurate.
+        </adm:synopsis>
+      </adm:other>
+    </adm:requires-admin-action>
+    <adm:syntax>
+      <adm:enumeration>
+        <adm:value name="equality">
+          <adm:synopsis>
+            This index type is used to improve the efficiency
+            of searches using equality search filters.
+          </adm:synopsis>
+        </adm:value>
+        <adm:value name="ordering">
+          <adm:synopsis>
+            This index type is used to improve the efficiency
+            of searches using "greater than or equal to" or "less then
+            or equal to" search filters.
+          </adm:synopsis>
+        </adm:value>
+        <adm:value name="presence">
+          <adm:synopsis>
+            This index type is used to improve the efficiency
+            of searches using the presence search filters.
+          </adm:synopsis>
+        </adm:value>
+        <adm:value name="substring">
+          <adm:synopsis>
+            This index type is used to improve the efficiency
+            of searches using substring search filters.
+          </adm:synopsis>
+        </adm:value>
+        <adm:value name="approximate">
+          <adm:synopsis>
+            This index type is used to improve the efficiency
+            of searches using approximate matching search filters.
+          </adm:synopsis>
+        </adm:value>
+      </adm:enumeration>
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-index-type</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="substring-length" advanced="true">
+    <adm:synopsis>
+      The length of substrings in a substring index.
+    </adm:synopsis>
+    <adm:requires-admin-action>
+      <adm:other>
+        <adm:synopsis>
+          The index must be rebuilt before it will reflect the
+          new value.
+        </adm:synopsis>
+      </adm:other>
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>6</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="3" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-substring-length</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+</adm:managed-object>
diff --git a/opendj-sdk/opends/src/messages/messages/ndb.properties b/opendj-sdk/opends/src/messages/messages/ndb.properties
new file mode 100644
index 0000000..036d557
--- /dev/null
+++ b/opendj-sdk/opends/src/messages/messages/ndb.properties
@@ -0,0 +1,247 @@
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License, Version 1.0 only
+# (the "License").  You may not use this file except in compliance
+# with the License.
+#
+# You can obtain a copy of the license at
+# trunk/opends/resource/legal-notices/OpenDS.LICENSE
+# or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at
+# trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+# add the following below this CDDL HEADER, with the fields enclosed
+# by brackets "[]" replaced with your own identifying information:
+#      Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+#      Copyright 2008-2009 Sun Microsystems, Inc.
+
+
+
+#
+# Global directives
+#
+global.category=NDB
+
+#
+# Format string definitions
+#
+# Keys must be formatted as follows:
+#
+# [SEVERITY]_[DESCRIPTION]_[ORDINAL]
+#
+# where:
+#
+# SEVERITY is one of:
+# [INFO, MILD_WARN, SEVERE_WARN, MILD_ERR, SEVERE_ERR, FATAL_ERR, DEBUG, NOTICE]
+#
+# DESCRIPTION is an upper case string providing a hint as to the context of
+# the message in upper case with the underscore ('_') character serving as
+# word separator
+#
+# ORDINAL is an integer unique among other ordinals in this file
+#
+MILD_ERR_NDB_INCORRECT_ROUTING_1=The backend does not contain that part of \
+ the Directory Information Tree pertaining to the entry '%s'
+SEVERE_ERR_NDB_OPEN_DATABASE_FAIL_2=The database could not be opened: %s
+SEVERE_ERR_NDB_OPEN_ENV_FAIL_3=The database environment could not be opened: \
+ %s
+SEVERE_ERR_NDB_HIGHEST_ID_FAIL_5=The database highest entry identifier could \
+ not be determined
+SEVERE_WARN_NDB_FUNCTION_NOT_SUPPORTED_6=The requested operation is not \
+ supported by this backend
+SEVERE_ERR_NDB_MISSING_DN2ID_RECORD_10=The DN database does not contain a \
+ record for '%s'
+SEVERE_ERR_NDB_MISSING_ID2ENTRY_RECORD_11=The entry database does not contain \
+ a record for ID %s
+SEVERE_ERR_NDB_ENTRY_DATABASE_CORRUPT_12=The entry database does not contain \
+ a valid record for ID %s
+SEVERE_ERR_NDB_DATABASE_EXCEPTION_14=Database exception: %s
+SEVERE_ERR_CONFIG_INDEX_TYPE_NEEDS_MATCHING_RULE_26=The attribute '%s' cannot \
+ have indexing of type '%s' because it does not have a corresponding matching \
+ rule
+MILD_ERR_NDB_UNCHECKED_EXCEPTION_28=Unchecked exception during database \
+ transaction
+NOTICE_NDB_SUBTREE_DELETE_SIZE_LIMIT_EXCEEDED_32=Exceeded the administrative \
+ limit on the number of entries that may be deleted in a subtree delete \
+ operation. The number of entries actually deleted was %d. The operation may \
+ be retried until all entries in the subtree have been deleted
+NOTICE_NDB_DELETED_ENTRY_COUNT_33=The number of entries deleted was %d
+MILD_ERR_NDB_DUPLICATE_CONFIG_ENTRY_36=The configuration entry '%s' will be \
+ ignored. Only one configuration entry with object class '%s' is allowed
+MILD_ERR_NDB_CONFIG_ENTRY_NOT_RECOGNIZED_37=The configuration entry '%s' will \
+ be ignored because it is not recognized
+MILD_ERR_NDB_INDEX_ATTRIBUTE_TYPE_NOT_FOUND_38=The index configuration entry \
+ '%s' will be ignored because it specifies an unknown attribute type '%s'
+MILD_ERR_NDB_DUPLICATE_INDEX_CONFIG_39=The index configuration entry '%s' \
+ will be ignored because it specifies the attribute type '%s', which has \
+ already been defined in another index configuration entry
+SEVERE_ERR_NDB_IO_ERROR_40=I/O error during backend operation: %s
+NOTICE_NDB_BACKEND_STARTED_42=The database backend %s containing %d entries \
+ has started
+MILD_ERR_NDB_IMPORT_PARENT_NOT_FOUND_43=The parent entry '%s' does not exist
+SEVERE_WARN_NDB_IMPORT_ENTRY_EXISTS_44=The entry exists and the import \
+ options do not allow it to be replaced
+MILD_ERR_NDB_ATTRIBUTE_INDEX_NOT_CONFIGURED_45=There is no index configured \
+ for attribute type '%s'
+MILD_ERR_NDB_SEARCH_NO_SUCH_OBJECT_46=The search base entry '%s' does not \
+ exist
+MILD_ERR_NDB_ADD_NO_SUCH_OBJECT_47=The entry '%s' cannot be added because its \
+ parent entry does not exist
+MILD_ERR_NDB_DELETE_NO_SUCH_OBJECT_48=The entry '%s' cannot be removed \
+ because it does not exist
+MILD_ERR_NDB_MODIFY_NO_SUCH_OBJECT_49=The entry '%s' cannot be modified \
+ because it does not exist
+MILD_ERR_NDB_MODIFYDN_NO_SUCH_OBJECT_50=The entry '%s' cannot be renamed \
+ because it does not exist
+MILD_ERR_NDB_ADD_ENTRY_ALREADY_EXISTS_51=The entry '%s' cannot be added \
+ because an entry with that name already exists
+MILD_ERR_NDB_DELETE_NOT_ALLOWED_ON_NONLEAF_52=The entry '%s' cannot be \
+ removed because it has subordinate entries
+MILD_ERR_NDB_MODIFYDN_ALREADY_EXISTS_53=The entry cannot be renamed to '%s' \
+ because an entry with that name already exists
+MILD_ERR_NDB_NEW_SUPERIOR_NO_SUCH_OBJECT_54=The entry cannot be moved because \
+ the new parent entry '%s' does not exist
+NOTICE_NDB_EXPORT_FINAL_STATUS_87=Exported %d entries and skipped %d in %d \
+ seconds (average rate %.1f/sec)
+NOTICE_NDB_EXPORT_PROGRESS_REPORT_88=Exported %d records and skipped %d (recent \
+ rate %.1f/sec)
+NOTICE_NDB_IMPORT_THREAD_COUNT_89=Import Thread Count: %d threads
+INFO_NDB_IMPORT_BUFFER_SIZE_90=Buffer size per thread = %,d
+INFO_NDB_IMPORT_LDIF_PROCESSING_TIME_91=LDIF processing took %d seconds
+INFO_NDB_IMPORT_INDEX_PROCESSING_TIME_92=Index processing took %d seconds
+NOTICE_NDB_WAITING_FOR_CLUSTER_93=Waiting for NDB Cluster to become \
+ available
+NOTICE_NDB_IMPORT_FINAL_STATUS_94=Processed %d entries, imported %d, skipped \
+ %d, rejected %d and migrated %d in %d seconds (average rate %.1f/sec)
+NOTICE_NDB_IMPORT_ENTRY_LIMIT_EXCEEDED_COUNT_95=Number of index values that \
+ exceeded the entry limit: %d
+NOTICE_NDB_IMPORT_PROGRESS_REPORT_96=Processed %d entries, skipped %d, rejected \
+ %d, and migrated %d (recent rate %.1f/sec)
+NOTICE_NDB_VERIFY_CLEAN_FINAL_STATUS_101=Checked %d records and found %d \
+ error(s) in %d seconds (average rate %.1f/sec)
+INFO_NDB_VERIFY_MULTIPLE_REFERENCE_COUNT_102=Number of records referencing \
+ more than one entry: %d
+INFO_NDB_VERIFY_ENTRY_LIMIT_EXCEEDED_COUNT_103=Number of records that exceed \
+ the entry limit: %d
+INFO_NDB_VERIFY_AVERAGE_REFERENCE_COUNT_104=Average number of entries \
+ referenced is %.2f/record
+INFO_NDB_VERIFY_MAX_REFERENCE_COUNT_105=Maximum number of entries referenced \
+ by any record is %d
+NOTICE_NDB_BOOTSTRAP_SCHEMA_106=Processing LDAP schema and NDB tables, this \
+ may take awhile
+INFO_NDB_VERIFY_ENTRY_LIMIT_STATS_HEADER_107=Statistics for records that have \
+ exceeded the entry limit:
+NOTICE_NDB_VERIFY_PROGRESS_REPORT_109=Processed %d out of %d records and found \
+ %d error(s) (recent rate %.1f/sec)
+MILD_ERR_NDB_INVALID_PAGED_RESULTS_COOKIE_111=The following paged results \
+ control cookie value was not recognized: %s
+NOTICE_NDB_REFERRAL_RESULT_MESSAGE_112=A referral entry %s indicates that the \
+ operation must be processed at a different server
+SEVERE_ERR_NDB_INCOMPATIBLE_ENTRY_VERSION_126=Entry record with ID %s is not \
+ compatible with this version of the backend database. Entry version: %x
+NOTICE_NDB_LOOKTHROUGH_LIMIT_EXCEEDED_127=This search operation has checked the \
+ maximum of %d entries for matches
+SEVERE_WARN_NDB_GET_ENTRY_COUNT_FAILED_129=Unable to determine the total \
+ number of entries in the container: %s
+NOTICE_NDB_CONFIG_ATTR_REQUIRES_RESTART_130=The change to the %s attribute will \
+ not take effect until the backend is restarted
+NOTICE_NDB_REBUILD_PROGRESS_REPORT_131=%.1f%% Completed. Processed %d/%d \
+ records. (recent rate %.1f/sec)
+NOTICE_NDB_REBUILD_FINAL_STATUS_133=Rebuild complete. Processed %d records in \
+ %d seconds (average rate %.1f/sec)
+SEVERE_ERR_NDB_REBUILD_INDEX_FAILED_134=An error occurred while rebuilding \
+ index %s: %s
+MILD_ERR_NDB_REBUILD_INSERT_ENTRY_FAILED_135=An error occurred while \
+ inserting entry into the %s database/index: %s
+SEVERE_ERR_NDB_REBUILD_INDEX_CONFLICT_136=Another rebuild of index %s is \
+ already in progress
+NOTICE_NDB_REBUILD_START_137=Rebuild of index(es) %s started with %d total \
+ records to process
+SEVERE_ERR_NDB_REBUILD_BACKEND_ONLINE_138=Rebuilding system index(es) must be \
+ done with the backend containing the base DN disabled
+SEVERE_ERR_ENTRYIDSORTER_CANNOT_EXAMINE_ENTRY_139=Unable to examine the entry \
+ with ID %s for sorting purposes:  %s
+MILD_ERR_NDB_SEARCH_CANNOT_SORT_UNINDEXED_140=The search results cannot be \
+ sorted because the given search request is not indexed
+MILD_ERR_ENTRYIDSORTER_NEGATIVE_START_POS_141=Unable to process the virtual \
+ list view request because the target start position was before the beginning \
+ of the result set
+MILD_ERR_ENTRYIDSORTER_OFFSET_TOO_LARGE_142=Unable to process the virtual \
+ list view request because the target offset %d was greater than the total \
+ number of results in the list (%d)
+MILD_ERR_ENTRYIDSORTER_TARGET_VALUE_NOT_FOUND_143=Unable to process the \
+ virtual list view request because no entry was found in the result set with a \
+ sort value greater than or equal to the provided assertion value
+MILD_ERR_NDB_SEARCH_CANNOT_MIX_PAGEDRESULTS_AND_VLV_144=The requested search \
+ operation included both the simple paged results control and the virtual list \
+ view control.  These controls are mutually exclusive and cannot be used \
+ together
+MILD_ERR_NDB_SEARCH_UNINDEXED_INSUFFICIENT_PRIVILEGES_145=You do not have \
+ sufficient privileges to perform an unindexed search
+NOTICE_NDB_CONFIG_INDEX_ENTRY_LIMIT_REQUIRES_REBUILD_148=Some index keys have \
+ already exceeded the previous index entry limit in index %s. This index must \
+ be rebuilt before it can use the new limit
+NOTICE_NDB_INDEX_ADD_REQUIRES_REBUILD_150=Due to changes in the \
+ configuration, index %s is currently operating in a degraded state and must \
+ be rebuilt before it can used
+SEVERE_ERR_NDB_INDEX_CORRUPT_REQUIRES_REBUILD_151=An error occurred while \
+ reading from index %s. The index seems to be corrupt and is now operating in \
+ a degraded state. The index must be rebuilt before it can return to normal \
+ operation
+SEVERE_ERR_NDB_IMPORT_BACKEND_ONLINE_152=The backend must be disabled before \
+ the import process can start
+SEVERE_ERR_NDB_IMPORT_THREAD_EXCEPTION_153=An error occurred in import thread \
+ %s: %s. The thread can not continue
+SEVERE_ERR_NDB_IMPORT_NO_WORKER_THREADS_154=There are no more import worker \
+ threads to process the imported entries
+SEVERE_ERR_NDB_IMPORT_CREATE_TMPDIR_ERROR_155=Unable to create the temporary \
+ directory %s
+NOTICE_NDB_IMPORT_MIGRATION_START_157=Migrating %s entries for base DN %s
+NOTICE_NDB_IMPORT_LDIF_START_158=Processing LDIF
+NOTICE_NDB_IMPORT_LDIF_END_159=End of LDIF reached
+SEVERE_ERR_NDB_CONFIG_VLV_INDEX_UNDEFINED_ATTR_160=Sort attribute %s for VLV \
+ index %s is not defined in the server schema
+SEVERE_ERR_NDB_CONFIG_VLV_INDEX_BAD_FILTER_161=An error occurred while parsing \
+ the search filter %s defined for VLV index %s: %s
+MILD_ERR_NDB_VLV_INDEX_NOT_CONFIGURED_162=There is no VLV index configured \
+ with name '%s'
+MILD_ERR_NDB_MODIFYDN_ABORTED_BY_SUBORDINATE_PLUGIN_163=A plugin caused the \
+ modify DN operation to be aborted while moving and/or renaming an entry from \
+ %s to %s
+MILD_ERR_NDB_MODIFYDN_ABORTED_BY_SUBORDINATE_SCHEMA_ERROR_164=A plugin caused \
+ the modify DN operation to be aborted while moving and/or renaming an entry \
+ from %s to %s because the change to that entry violated the server schema \
+ configuration:  %s
+SEVERE_ERR_NDB_COMPSCHEMA_CANNOT_STORE_STATUS_167=An error occurred while \
+ attempting to store compressed schema information in the database.  The \
+ result returned was:  %s
+SEVERE_ERR_NDB_COMPSCHEMA_CANNOT_STORE_EX_168=An error occurred while \
+ attempting to store compressed schema information in the database:  %s
+SEVERE_ERR_NDB_COMPSCHEMA_CANNOT_STORE_MULTIPLE_FAILURES_169=The server was \
+ unable to store compressed schema information in the database after multiple \
+ attempts
+SEVERE_ERR_NDB_COMPSCHEMA_UNKNOWN_OC_TOKEN_170=Unable to decode the provided \
+ object class set because it used an undefined token %s
+SEVERE_ERR_NDB_COMPSCHEMA_UNRECOGNIZED_AD_TOKEN_171=Unable to decode the \
+ provided attribute because it used an undefined attribute description token \
+ %s
+NOTICE_NDB_IMPORT_STARTING_173=%s starting import (build %s, R%d)
+SEVERE_ERR_NDB_IMPORT_LDIF_ABORT_175=The import was aborted because an \
+  uncaught exception was thrown during processing
+NOTICE_NDB_IMPORT_LDIF_BUFFER_TOT_AVAILMEM_184=Available buffer memory %d bytes is \
+  below the minimum value of %d bytes. Setting available buffer memory to \
+  the minimum
+NOTICE_NDB_IMPORT_LDIF_BUFFER_CONTEXT_AVAILMEM_186=Available buffer memory %d \
+  bytes is below the minimum value of %d bytes allowed for a single import \
+  context. Setting context available buffer memory to the minimum
+SEVERE_ERR_NDB_IMPORT_OFFLINE_NOT_SUPPORTED_191=Offline import is currently \
+ not supported by this backend
+SEVERE_ERR_NDB_EXPORT_OFFLINE_NOT_SUPPORTED_192=Offline export is currently \
+ not supported by this backend
diff --git a/opendj-sdk/opends/src/messages/src/org/opends/messages/Category.java b/opendj-sdk/opends/src/messages/src/org/opends/messages/Category.java
index d1c059e..23427f4 100644
--- a/opendj-sdk/opends/src/messages/src/org/opends/messages/Category.java
+++ b/opendj-sdk/opends/src/messages/src/org/opends/messages/Category.java
@@ -22,7 +22,7 @@
  * CDDL HEADER END
  *
  *
- *      Copyright 2007-2008 Sun Microsystems, Inc.
+ *      Copyright 2007-2009 Sun Microsystems, Inc.
  */
 
 package org.opends.messages;
@@ -162,6 +162,11 @@
   SERVICETAG(0x01400000),
 
   /**
+   * The category used for messages associated with the NDB backend.
+   */
+  NDB(0x01500000),
+
+  /**
    * The category that will be used for messages associated with
    * third-party (including user-defined) modules.
    */
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/AbstractTransaction.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/AbstractTransaction.java
new file mode 100644
index 0000000..0ede8a1
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/AbstractTransaction.java
@@ -0,0 +1,188 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.backends.ndb;
+
+import com.mysql.cluster.ndbj.Ndb;
+import com.mysql.cluster.ndbj.NdbApiException;
+import com.mysql.cluster.ndbj.NdbOperation.AbortOption;
+import com.mysql.cluster.ndbj.NdbTransaction;
+import com.mysql.cluster.ndbj.NdbTransaction.ExecType;
+
+
+/**
+ * This class represents abstract transaction.
+ */
+public class AbstractTransaction {
+
+  private Ndb ndb;
+
+  private NdbTransaction ndbTxn;
+
+  private NdbTransaction ndbDATxn;
+
+  private RootContainer rootContainer;
+
+  /**
+   * Default constructor.
+   * @param rootContainer root container to associate transaction with.
+   */
+  public AbstractTransaction(RootContainer rootContainer) {
+    this.ndb = null;
+    this.ndbTxn = null;
+    this.ndbDATxn = null;
+    this.rootContainer = rootContainer;
+  }
+
+  /**
+   * Get Ndb handle associated with this abstract transaction.
+   * @return Ndb handle.
+   */
+  public Ndb getNdb()
+  {
+    if (ndb == null) {
+      ndb = rootContainer.getNDB();
+    }
+
+    return ndb;
+  }
+
+  /**
+   * Get transaction.
+   * @return A transaction handle.
+   * @throws NdbApiException If an error occurs while attempting to begin
+   * a new transaction.
+   */
+  public NdbTransaction getNdbTransaction()
+      throws NdbApiException
+  {
+    if (ndb == null) {
+      ndb = rootContainer.getNDB();
+    }
+    if (ndbTxn == null) {
+      ndbTxn = ndb.startTransaction();
+    }
+
+    return ndbTxn;
+  }
+
+  /**
+   * Get DA transaction.
+   * @param tableName table name for DA.
+   * @param partitionKey partition key for DA.
+   * @return A transaction handle.
+   * @throws NdbApiException If an error occurs while attempting to begin
+   * a new transaction.
+   */
+  public NdbTransaction
+  getNdbDATransaction(String tableName, long partitionKey)
+    throws NdbApiException
+  {
+    if (ndb == null) {
+      ndb = rootContainer.getNDB();
+    }
+    if (ndbDATxn == null) {
+      ndbDATxn = ndb.startTransactionBig(tableName, partitionKey);
+    }
+
+    return ndbDATxn;
+  }
+
+  /**
+   * Commit transaction.
+   * @throws NdbApiException If an error occurs while attempting to commit
+   * the transaction.
+   */
+  public void commit()
+    throws NdbApiException {
+    try {
+      if (ndbDATxn != null) {
+        try {
+          ndbDATxn.execute(ExecType.Commit, AbortOption.AbortOnError, true);
+        } finally {
+          if (ndbDATxn != null) {
+            ndbDATxn.close();
+          }
+        }
+      }
+      if (ndbTxn != null) {
+        try {
+          ndbTxn.execute(ExecType.Commit, AbortOption.AbortOnError, true);
+        } finally {
+          if (ndbTxn != null) {
+            ndbTxn.close();
+          }
+        }
+      }
+    } finally {
+      if (ndb != null) {
+        rootContainer.releaseNDB(ndb);
+      }
+      ndbDATxn = null;
+      ndbTxn = null;
+      ndb = null;
+    }
+  }
+
+  /**
+   * Execute transaction.
+   * @throws NdbApiException If an error occurs while attempting to execute
+   * the transaction.
+   */
+  public void execute()
+    throws NdbApiException {
+    if (ndbDATxn != null) {
+      ndbDATxn.execute(ExecType.NoCommit, AbortOption.AbortOnError, true);
+    }
+    if (ndbTxn != null) {
+      ndbTxn.execute(ExecType.NoCommit, AbortOption.AbortOnError, true);
+    }
+  }
+
+  /**
+   * Close transaction.
+   * @throws NdbApiException If an error occurs while attempting to close the
+   * transaction.
+   */
+  public void close()
+    throws NdbApiException {
+    try {
+      if (ndbDATxn != null) {
+        ndbDATxn.close();
+      }
+      if (ndbTxn != null) {
+        ndbTxn.close();
+      }
+    } finally {
+      if (ndb != null) {
+        rootContainer.releaseNDB(ndb);
+      }
+      ndbDATxn = null;
+      ndbTxn = null;
+      ndb = null;
+    }
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/BackendImpl.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/BackendImpl.java
new file mode 100644
index 0000000..ced3fa5
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/BackendImpl.java
@@ -0,0 +1,2101 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.backends.ndb;
+import com.mysql.cluster.ndbj.NdbApiException;
+import com.mysql.cluster.ndbj.NdbOperation;
+import java.io.IOException;
+import org.opends.messages.Message;
+
+import java.util.concurrent.atomic.AtomicInteger;
+
+import java.util.*;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.concurrent.ConcurrentHashMap;
+import org.opends.server.api.Backend;
+import org.opends.server.api.MonitorProvider;
+import org.opends.server.api.AlertGenerator;
+import org.opends.server.config.ConfigException;
+import org.opends.server.core.AddOperation;
+import org.opends.server.core.DeleteOperation;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.core.ModifyOperation;
+import org.opends.server.core.ModifyDNOperation;
+import org.opends.server.core.SearchOperation;
+import org.opends.server.util.Validator;
+import static org.opends.server.util.StaticUtils.*;
+
+import static org.opends.messages.BackendMessages.*;
+import static org.opends.messages.NdbMessages.*;
+import static org.opends.server.loggers.ErrorLogger.logError;
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import org.opends.server.loggers.debug.DebugTracer;
+import org.opends.server.types.*;
+import static org.opends.server.util.ServerConstants.*;
+import org.opends.server.admin.Configuration;
+import org.opends.server.admin.server.ConfigurationChangeListener;
+import org.opends.server.admin.std.meta.GlobalCfgDefn.WorkflowConfigurationMode;
+import org.opends.server.admin.std.server.NdbBackendCfg;
+import org.opends.server.admin.std.server.NdbIndexCfg;
+import org.opends.server.backends.SchemaBackend;
+import org.opends.server.backends.ndb.importLDIF.Importer;
+import org.opends.server.core.Workflow;
+import org.opends.server.core.WorkflowImpl;
+import org.opends.server.core.networkgroups.NetworkGroup;
+import org.opends.server.types.DN;
+import org.opends.server.util.LDIFException;
+import org.opends.server.workflowelement.WorkflowElement;
+import org.opends.server.workflowelement.ndb.NDBWorkflowElement;
+
+/**
+ * This is an implementation of a Directory Server Backend which stores
+ * entries in MySQL Cluster NDB database engine.
+ */
+public class BackendImpl
+    extends Backend
+    implements ConfigurationChangeListener<NdbBackendCfg>, AlertGenerator
+{
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+
+  /**
+    * The fully-qualified name of this class.
+    */
+  private static final String CLASS_NAME =
+        "org.opends.server.backends.ndb.BackendImpl";
+
+
+  /**
+   * The configuration of this NDB backend.
+   */
+  private NdbBackendCfg cfg;
+
+  /**
+   * The root container to use for this backend.
+   */
+  private RootContainer rootContainer;
+
+  /**
+   * A count of the total operation threads currently in the backend.
+   */
+  private AtomicInteger threadTotalCount = new AtomicInteger(0);
+
+  /**
+   * A count of the write operation threads currently in the backend.
+   */
+  private AtomicInteger threadWriteCount = new AtomicInteger(0);
+
+  /**
+   * A list of monitor providers created for this backend instance.
+   */
+  private ArrayList<MonitorProvider<?>> monitorProviders =
+      new ArrayList<MonitorProvider<?>>();
+
+  /**
+   * The base DNs defined for this backend instance.
+   */
+  private DN[] baseDNs;
+
+  /**
+   * The mysqld connection object.
+   */
+  private Connection sqlConn;
+
+  /**
+   * The controls supported by this backend.
+   */
+  private static HashSet<String> supportedControls;
+
+  /**
+   * Database name.
+   */
+  protected static String DATABASE_NAME;
+
+  /**
+   * Attribute column length.
+   */
+  private static int ATTRLEN;
+
+  /**
+   * Attribute column length string.
+   */
+  private static String ATTRLEN_STRING;
+
+  /**
+   * NDB Max Row Size known.
+   */
+  private static final int NDB_MAXROWSIZE = 8052;
+
+  /**
+   * Number of rDN components supported.
+   */
+  protected static final int DN2ID_DN_NC = 16;
+
+  /**
+   * Number of times to retry NDB transaction.
+   */
+  protected static int TXN_RETRY_LIMIT = 0;
+
+  /**
+   * DN2ID table.
+   */
+  protected static final String DN2ID_TABLE = "DS_dn2id";
+
+  /**
+   * NEXTID autoincrement table.
+   */
+  protected static final String NEXTID_TABLE = "DS_nextid";
+
+  /**
+   * Operational Attributes table.
+   */
+  protected static final String OPATTRS_TABLE = "DS_opattrs";
+
+  /**
+   * Operational Attributes table.
+   */
+  protected static final String TAGS_TABLE = "DS_tags";
+
+  /**
+   * Index table prefix.
+   */
+  protected static final String IDX_TABLE_PREFIX = "DS_idx_";
+
+  /**
+   * Referrals table.
+   */
+  protected static final String REFERRALS_TABLE = "referral";
+
+  /**
+   * Name prefix for server specific objectclasses.
+   */
+  private static final String DSOBJ_NAME_PREFIX = "ds-";
+
+  /**
+   * EID column name.
+   */
+  protected static final String EID = "eid";
+
+  /**
+   * MID column name.
+   */
+  protected static final String MID = "mid";
+
+  /**
+   * DN column name prefix.
+   */
+  protected static final String DN2ID_DN = "a";
+
+  /**
+   * OC column name.
+   */
+  protected static final String DN2ID_OC = "object_classes";
+
+  /**
+   * Extensible OC column name.
+   */
+  protected static final String DN2ID_XOC = "x_object_classes";
+
+  /**
+   * Attribute column name.
+   */
+  protected static final String TAG_ATTR = "attr";
+
+  /**
+   * Tags column name.
+   */
+  protected static final String TAG_TAGS = "tags";
+
+  /**
+   * Value column name.
+   */
+  protected static final String IDX_VAL = "value";
+
+  /**
+   * Attribute name to lowercase name map.
+   */
+  protected static Map<String, String> attrName2LC;
+
+  /**
+   * Set of blob attribute names.
+   */
+  protected static Set<String> blobAttributes;
+
+  /**
+   * List of operational attribute names.
+   */
+  protected static List<String> operationalAttributes;
+
+  /**
+   * List of index names.
+   */
+  protected static List<String> indexes;
+
+  /**
+   * Attribute name to ObjectClass name/s map.
+   */
+  protected static Map<String, String> attr2Oc;
+
+  /**
+   * Entry DN to Transaction Map to track entry locking.
+   */
+  protected static Map<DN, AbstractTransaction> lockMap;
+
+  /**
+   * The features supported by this backend.
+   */
+  private static HashSet<String> supportedFeatures;
+
+  static
+  {
+    // Set our supported controls.
+    supportedControls = new HashSet<String>();
+    supportedControls.add(OID_MANAGE_DSAIT_CONTROL);
+    supportedControls.add(OID_SUBTREE_DELETE_CONTROL);
+
+    attrName2LC = new HashMap<String, String>();
+    operationalAttributes = new ArrayList<String>();
+    blobAttributes = new HashSet<String>();
+    indexes = new ArrayList<String>();
+    attr2Oc = new HashMap<String, String>();
+    lockMap = new ConcurrentHashMap<DN, AbstractTransaction>();
+
+    // Set supported features.
+    supportedFeatures = new HashSet<String>();
+  }
+
+
+
+  /**
+   * Begin a Backend API method that reads the database.
+   */
+  private void readerBegin()
+  {
+    threadTotalCount.getAndIncrement();
+  }
+
+
+
+  /**
+   * End a Backend API method that reads the database.
+   */
+  private void readerEnd()
+  {
+    threadTotalCount.getAndDecrement();
+  }
+
+
+
+  /**
+   * Begin a Backend API method that writes the database.
+   */
+  private void writerBegin()
+  {
+    threadTotalCount.getAndIncrement();
+    threadWriteCount.getAndIncrement();
+  }
+
+
+
+  /**
+   * End a Backend API method that writes the database.
+   */
+  private void writerEnd()
+  {
+    threadWriteCount.getAndDecrement();
+    threadTotalCount.getAndDecrement();
+  }
+
+
+
+  /**
+   * Wait until there are no more threads accessing the database. It is assumed
+   * that new threads have been prevented from entering the database at the time
+   * this method is called.
+   */
+  private void waitUntilQuiescent()
+  {
+    while (threadTotalCount.get() > 0)
+    {
+      // Still have threads in the database so sleep a little
+      try
+      {
+        Thread.sleep(500);
+      }
+      catch (InterruptedException e)
+      {
+        if (debugEnabled())
+        {
+          TRACER.debugCaught(DebugLogLevel.ERROR, e);
+        }
+      }
+    }
+  }
+
+
+
+  /**
+   * Get suggested minimum upper bound for given attribute type.
+   * @param attrType attribute type
+   * @return suggested upper bound
+   *         or 0 if none suggested.
+   */
+  private int getAttributeBound(AttributeType attrType) {
+    // HACK: This should be done by Directory Server
+    // Schema parser and available in AttributeSyntax.
+    String attrDefinition = attrType.getDefinition();
+    try {
+      int boundOpenIndex = attrDefinition.indexOf("{");
+      if (boundOpenIndex == -1) {
+        return 0;
+      }
+      int boundCloseIndex = attrDefinition.indexOf("}");
+      if (boundCloseIndex == -1) {
+        return 0;
+      }
+      String boundString = attrDefinition.substring(
+        boundOpenIndex + 1, boundCloseIndex);
+      return Integer.parseInt(boundString);
+    } catch (Exception ex) {
+      return 0;
+    }
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  public void configureBackend(Configuration cfg)
+      throws ConfigException
+  {
+    Validator.ensureNotNull(cfg);
+    Validator.ensureTrue(cfg instanceof NdbBackendCfg);
+
+    this.cfg = (NdbBackendCfg)cfg;
+
+    Set<DN> dnSet = this.cfg.getBaseDN();
+    baseDNs = new DN[dnSet.size()];
+    dnSet.toArray(baseDNs);
+
+    this.DATABASE_NAME = this.cfg.getNdbDbname();
+    this.ATTRLEN = this.cfg.getNdbAttrLen();
+    this.ATTRLEN_STRING = Integer.toString(ATTRLEN);
+    this.TXN_RETRY_LIMIT = this.cfg.getDeadlockRetryLimit();
+
+    for (String attrName : this.cfg.getNdbAttrBlob()) {
+      this.blobAttributes.add(attrName);
+    }
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public void initializeBackend()
+      throws ConfigException, InitializationException
+  {
+    // Checksum this db environment and register its offline state id/checksum.
+    DirectoryServer.registerOfflineBackendStateID(this.getBackendID(), 0);
+
+    // Load MySQL JDBC driver.
+    try {
+      // The newInstance() call is a work around for some
+      // broken Java implementations
+      Class.forName("com.mysql.jdbc.Driver").newInstance();
+    } catch (Exception ex) {
+      throw new InitializationException(
+        ERR_NDB_DATABASE_EXCEPTION.get(ex.getMessage()));
+    }
+
+    // Get MySQL connection.
+    try {
+      sqlConn =
+        DriverManager.getConnection("jdbc:mysql://" +
+        cfg.getSqlConnectString(),
+        cfg.getSqlUser(), cfg.getSqlPasswd());
+    } catch (SQLException ex) {
+      throw new InitializationException(
+        ERR_NDB_DATABASE_EXCEPTION.get(ex.getMessage()));
+    }
+
+    // Initialize the database.
+    Statement stmt = null;
+    try {
+      stmt = sqlConn.createStatement();
+      stmt.execute("CREATE DATABASE IF NOT EXISTS " + DATABASE_NAME);
+    } catch (SQLException ex) {
+      throw new InitializationException(
+        ERR_NDB_DATABASE_EXCEPTION.get("CREATE DATABASE failed: " +
+        ex.getMessage()));
+    } finally {
+      // release resources.
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException ex) {}
+        stmt = null;
+      }
+    }
+
+    try {
+      stmt = sqlConn.createStatement();
+      stmt.execute("USE " + DATABASE_NAME);
+    } catch (SQLException ex) {
+      throw new InitializationException(
+        ERR_NDB_DATABASE_EXCEPTION.get("USE failed: " +
+        ex.getMessage()));
+    } finally {
+      // release resources.
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException ex) {}
+        stmt = null;
+      }
+    }
+
+    // Log a message indicating that database init
+    // and schema bootstraping are about to start.
+    logError(NOTE_NDB_BOOTSTRAP_SCHEMA.get());
+
+    // Initialize dn2id table.
+    try {
+      stmt = sqlConn.createStatement();
+      stmt.execute("CREATE TABLE IF NOT EXISTS " +
+        DN2ID_TABLE + "(" +
+        "eid bigint unsigned NOT NULL, " +
+    "object_classes VARCHAR(1024) NOT NULL, " +
+        "x_object_classes VARCHAR(1024) NOT NULL, " +
+    "a0 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a1 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a2 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a3 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a4 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a5 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a6 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a7 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a8 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a9 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a10 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a11 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a12 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a13 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a14 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "a15 VARCHAR(" + ATTRLEN_STRING + ") NOT NULL DEFAULT '', " +
+    "PRIMARY KEY (a0, a1, a2, a3, a4, a5, a6, " +
+        "a7, a8, a9, a10, a11, a12, a13, a14, a15), " +
+        "UNIQUE KEY eid (eid)" +
+        ") ENGINE=ndb");
+    } catch (SQLException ex) {
+      throw new InitializationException(
+        ERR_NDB_DATABASE_EXCEPTION.get("CREATE TABLE " +
+          DN2ID_TABLE + " failed: " + ex.getMessage()));
+    } finally {
+      // release resources.
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException ex) {}
+        stmt = null;
+      }
+    }
+
+    // Initialize nextid autoincrement table.
+    try {
+      stmt = sqlConn.createStatement();
+      stmt.execute("CREATE TABLE IF NOT EXISTS " +
+        NEXTID_TABLE + "(" +
+        "a bigint unsigned AUTO_INCREMENT PRIMARY KEY" +
+        ") ENGINE=ndb");
+    } catch (SQLException ex) {
+      throw new InitializationException(
+        ERR_NDB_DATABASE_EXCEPTION.get("CREATE TABLE " +
+          NEXTID_TABLE + " failed: " + ex.getMessage()));
+    } finally {
+      // release resources.
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException ex) {}
+        stmt = null;
+      }
+    }
+
+    // Set schema read only.
+    SchemaBackend schemaBackend =
+      (SchemaBackend) DirectoryServer.getBackend("schema");
+    if (schemaBackend != null) {
+      schemaBackend.setWritabilityMode(WritabilityMode.DISABLED);
+    }
+
+    // Set defined attributes.
+    Map<String, AttributeType> attrTypesMap =
+      DirectoryServer.getSchema().getAttributeTypes();
+    for (AttributeType attrType : attrTypesMap.values()) {
+      String attrName = attrType.getNameOrOID();
+      attrName2LC.put(attrName, attrName.toLowerCase());
+      // Skip over server specific object classes.
+      // FIXME: this is not clean.
+      if (attrName.startsWith(DSOBJ_NAME_PREFIX)) {
+        continue;
+      }
+      if (attrType.getUsage() == AttributeUsage.DIRECTORY_OPERATION) {
+        if (operationalAttributes.contains(attrName)) {
+          continue;
+        }
+        operationalAttributes.add(attrName);
+      }
+    }
+    // Strip virtual attributes.
+    for (VirtualAttributeRule rule :
+      DirectoryServer.getVirtualAttributes())
+    {
+      String attrName = rule.getAttributeType().getNameOrOID();
+      if (operationalAttributes.contains(attrName)) {
+        operationalAttributes.remove(attrName);
+      }
+    }
+
+    // Initialize objectClass tables.
+    // TODO: dynamic schema validation and adjustement.
+    Map<String,ObjectClass> objectClasses =
+      DirectoryServer.getSchema().getObjectClasses();
+
+    Set<Map.Entry<String, ObjectClass>> ocKeySet =
+      objectClasses.entrySet();
+    for (Map.Entry<String, ObjectClass> ocEntry : ocKeySet) {
+      ObjectClass oc = ocEntry.getValue();
+      String ocName = oc.getNameOrOID();
+
+      if (oc.getObjectClassType() == ObjectClassType.ABSTRACT) {
+        continue;
+      }
+
+      // Skip over server specific object classes.
+      // FIXME: this is not clean.
+      if (ocName.startsWith(DSOBJ_NAME_PREFIX)) {
+        continue;
+      }
+
+      int nColumns = 0;
+      StringBuilder attrsBuffer = new StringBuilder();
+      Set<AttributeType> reqAttrs = oc.getRequiredAttributes();
+
+      for (AttributeType attrType : reqAttrs) {
+        String attrName = attrType.getNameOrOID();
+        if (nColumns > 0) {
+          attrsBuffer.append(", ");
+        }
+        attrsBuffer.append("`");
+        attrsBuffer.append(attrName);
+        attrsBuffer.append("`");
+        if (blobAttributes.contains(attrName)) {
+          attrsBuffer.append(" BLOB");
+        } else {
+          attrsBuffer.append(" VARCHAR(");
+          int attrBound = getAttributeBound(attrType);
+          if ((attrBound > 0) && (attrBound < NDB_MAXROWSIZE)) {
+            attrsBuffer.append(Integer.toString(attrBound));
+          } else {
+            attrsBuffer.append(ATTRLEN_STRING);
+          }
+          attrsBuffer.append(")");
+        }
+        if (!attr2Oc.containsKey(attrName)) {
+          attr2Oc.put(attrName, ocName);
+        }
+        nColumns++;
+      }
+
+      Set<AttributeType> optAttrs = oc.getOptionalAttributes();
+
+      for (AttributeType attrType : optAttrs) {
+        String attrName = attrType.getNameOrOID();
+        if (nColumns > 0) {
+          attrsBuffer.append(", ");
+        }
+        attrsBuffer.append("`");
+        attrsBuffer.append(attrName);
+        attrsBuffer.append("`");
+        if (blobAttributes.contains(attrName)) {
+          attrsBuffer.append(" BLOB");
+        } else {
+          attrsBuffer.append(" VARCHAR(");
+          int attrBound = getAttributeBound(attrType);
+          if ((attrBound > 0) && (attrBound < NDB_MAXROWSIZE)) {
+            attrsBuffer.append(Integer.toString(attrBound));
+          } else {
+            attrsBuffer.append(ATTRLEN_STRING);
+          }
+          attrsBuffer.append(")");
+        }
+        if (!attr2Oc.containsKey(attrName)) {
+          attr2Oc.put(attrName, ocName);
+        }
+        nColumns++;
+      }
+
+      if (attrsBuffer.toString().length() != 0) {
+        attrsBuffer.append(", PRIMARY KEY(eid, mid))");
+      }
+
+      String attrsString = attrsBuffer.toString();
+
+      try {
+        stmt = sqlConn.createStatement();
+        stmt.execute("CREATE TABLE IF NOT EXISTS " +
+          "`" + ocName + "`" + " (" +
+          "eid bigint unsigned NOT NULL, " +
+          "mid int unsigned NOT NULL" +
+          (attrsString.length() != 0 ? ", " +
+           attrsString : ", PRIMARY KEY(eid, mid))") +
+          " ENGINE=ndb PARTITION BY KEY(eid)");
+      } catch (SQLException ex) {
+        throw new InitializationException(
+          ERR_NDB_DATABASE_EXCEPTION.get("CREATE TABLE " +
+            ocName + " failed: " + ex.getMessage()));
+      } finally {
+        // release resources.
+        if (stmt != null) {
+          try {
+            stmt.close();
+          } catch (SQLException ex) {
+          }
+          stmt = null;
+        }
+      }
+    }
+
+    // Initialize operational attributes table.
+    int nColumns = 0;
+    StringBuilder attrsBuffer = new StringBuilder();
+
+    for (String attrName : operationalAttributes) {
+      if (nColumns > 0) {
+        attrsBuffer.append(", ");
+      }
+      attrsBuffer.append("`");
+      attrsBuffer.append(attrName);
+      attrsBuffer.append("`");
+      attrsBuffer.append(" VARCHAR(");
+      attrsBuffer.append(ATTRLEN_STRING);
+      attrsBuffer.append(")");
+      nColumns++;
+    }
+
+    if (attrsBuffer.toString().length() != 0) {
+      attrsBuffer.append(", PRIMARY KEY(eid))");
+    }
+
+    String attrsString = attrsBuffer.toString();
+
+    try {
+      stmt = sqlConn.createStatement();
+      stmt.execute("CREATE TABLE IF NOT EXISTS " +
+        "`" + OPATTRS_TABLE + "`" + " (" +
+        "eid bigint unsigned NOT NULL" +
+        (attrsString.length() != 0 ? ", " +
+        attrsString : ", PRIMARY KEY(eid))") +
+        " ENGINE=ndb PARTITION BY KEY(eid)");
+    } catch (SQLException ex) {
+      throw new InitializationException(
+        ERR_NDB_DATABASE_EXCEPTION.get("CREATE TABLE " +
+        OPATTRS_TABLE + " failed: " + ex.getMessage()));
+    } finally {
+      // release resources.
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException ex) {
+        }
+        stmt = null;
+      }
+    }
+
+    // Initialize attribute options table.
+    try {
+      stmt = sqlConn.createStatement();
+      stmt.execute("CREATE TABLE IF NOT EXISTS " +
+        TAGS_TABLE + "(" +
+        "eid bigint unsigned NOT NULL, " +
+        "attr VARCHAR(" + ATTRLEN_STRING + "), " +
+        "mid int unsigned NOT NULL, " +
+        "tags VARCHAR(" + ATTRLEN_STRING + "), " +
+        "PRIMARY KEY (eid, attr, mid))" +
+        " ENGINE=ndb PARTITION BY KEY(eid)");
+    } catch (SQLException ex) {
+      throw new InitializationException(
+        ERR_NDB_DATABASE_EXCEPTION.get("CREATE TABLE " +
+          TAGS_TABLE + " failed: " + ex.getMessage()));
+    } finally {
+      // release resources.
+      if (stmt != null) {
+        try {
+          stmt.close();
+        } catch (SQLException ex) {}
+        stmt = null;
+      }
+    }
+
+    // Initialize configured indexes.
+    for (String idx : cfg.listNdbIndexes()) {
+      NdbIndexCfg indexCfg = cfg.getNdbIndex(idx);
+      // TODO: Substring indexes.
+      AttributeType attrType = indexCfg.getAttribute();
+      String attrName = attrType.getNameOrOID();
+      indexes.add(attrName);
+      try {
+        stmt = sqlConn.createStatement();
+        stmt.execute("CREATE TABLE IF NOT EXISTS " +
+          IDX_TABLE_PREFIX + attrName + "(" +
+          "eid bigint unsigned NOT NULL, " +
+          "mid int unsigned NOT NULL, " +
+          "value VARCHAR(" + ATTRLEN_STRING + "), " +
+          "PRIMARY KEY (eid, mid), " +
+          "KEY value (value)" +
+          ") ENGINE=ndb PARTITION BY KEY(eid)");
+      } catch (SQLException ex) {
+        throw new InitializationException(
+          ERR_NDB_DATABASE_EXCEPTION.get("CREATE TABLE " +
+          IDX_TABLE_PREFIX + attrName + " failed: " +
+          ex.getMessage()));
+      } finally {
+        // release resources.
+        if (stmt != null) {
+          try {
+            stmt.close();
+          } catch (SQLException ex) {
+          }
+          stmt = null;
+        }
+      }
+    }
+
+    // Open Root Container.
+    if (rootContainer == null) {
+      rootContainer = initializeRootContainer();
+    }
+
+    try
+    {
+      // Log an informational message about the number of entries.
+      Message message = NOTE_NDB_BACKEND_STARTED.get(
+          cfg.getBackendId(), rootContainer.getEntryCount());
+      logError(message);
+    }
+    catch(NdbApiException ex)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, ex);
+      }
+      Message message =
+          WARN_NDB_GET_ENTRY_COUNT_FAILED.get(ex.getMessage());
+      throw new InitializationException(message, ex);
+    }
+
+    WorkflowConfigurationMode workflowConfigMode =
+      DirectoryServer.getWorkflowConfigurationMode();
+    DirectoryServer.setWorkflowConfigurationMode(
+      WorkflowConfigurationMode.MANUAL);
+
+    for (DN dn : cfg.getBaseDN())
+    {
+      try
+      {
+        DirectoryServer.registerBaseDN(dn, this, false);
+        WorkflowImpl workflowImpl = createWorkflow(dn);
+        registerWorkflowWithDefaultNetworkGroup(workflowImpl);
+      }
+      catch (Exception e)
+      {
+        if (debugEnabled())
+        {
+          TRACER.debugCaught(DebugLogLevel.ERROR, e);
+        }
+
+        Message message = ERR_BACKEND_CANNOT_REGISTER_BASEDN.get(
+            String.valueOf(dn), String.valueOf(e));
+        throw new InitializationException(message, e);
+      }
+    }
+
+    DirectoryServer.setWorkflowConfigurationMode(workflowConfigMode);
+
+    // Register as an AlertGenerator.
+    DirectoryServer.registerAlertGenerator(this);
+    // Register this backend as a change listener.
+    cfg.addNdbChangeListener(this);
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public void finalizeBackend()
+  {
+    // Deregister as a change listener.
+    cfg.removeNdbChangeListener(this);
+
+    WorkflowConfigurationMode workflowConfigMode =
+      DirectoryServer.getWorkflowConfigurationMode();
+    DirectoryServer.setWorkflowConfigurationMode(
+      WorkflowConfigurationMode.MANUAL);
+
+    // Deregister our base DNs.
+    for (DN dn : rootContainer.getBaseDNs())
+    {
+      try
+      {
+        DirectoryServer.deregisterBaseDN(dn);
+        deregisterWorkflowWithDefaultNetworkGroup(dn);
+      }
+      catch (Exception e)
+      {
+        if (debugEnabled())
+        {
+          TRACER.debugCaught(DebugLogLevel.ERROR, e);
+        }
+      }
+    }
+
+    DirectoryServer.setWorkflowConfigurationMode(workflowConfigMode);
+
+    // Deregister our monitor providers.
+    for (MonitorProvider<?> monitor : monitorProviders)
+    {
+      DirectoryServer.deregisterMonitorProvider(
+           monitor.getMonitorInstanceName().toLowerCase());
+    }
+    monitorProviders = new ArrayList<MonitorProvider<?>>();
+
+    // We presume the server will prevent more operations coming into this
+    // backend, but there may be existing operations already in the
+    // backend. We need to wait for them to finish.
+    waitUntilQuiescent();
+
+    // Close the database.
+    try
+    {
+      rootContainer.close();
+      rootContainer = null;
+    }
+    catch (NdbApiException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      Message message = ERR_NDB_DATABASE_EXCEPTION.get(e.getMessage());
+      logError(message);
+    }
+
+    try {
+      if (sqlConn != null) {
+        sqlConn.close();
+      }
+    } catch (SQLException e) {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      Message message = ERR_NDB_DATABASE_EXCEPTION.get(e.getMessage());
+      logError(message);
+    }
+
+    // Checksum this db environment and register its offline state id/checksum.
+    DirectoryServer.registerOfflineBackendStateID(this.getBackendID(), 0);
+
+    //Deregister the alert generator.
+    DirectoryServer.deregisterAlertGenerator(this);
+
+    // Make sure the thread counts are zero for next initialization.
+    threadTotalCount.set(0);
+    threadWriteCount.set(0);
+
+    // Log an informational message.
+    Message message = NOTE_BACKEND_OFFLINE.get(cfg.getBackendId());
+    logError(message);
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public boolean isLocal()
+  {
+    return true;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public boolean isIndexed(AttributeType attributeType, IndexType indexType)
+  {
+    // Substring indexing NYI.
+    if (indexType == IndexType.SUBSTRING) {
+      return false;
+    }
+    String attrName = attributeType.getNameOrOID();
+    if (indexes.contains(attrName)) {
+      return true;
+    }
+    return false;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public boolean supportsLDIFExport()
+  {
+    return true;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public boolean supportsLDIFImport()
+  {
+    return true;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public boolean supportsBackup()
+  {
+    return false;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public boolean supportsBackup(BackupConfig backupConfig,
+                                StringBuilder unsupportedReason)
+  {
+    return false;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public boolean supportsRestore()
+  {
+    return false;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public HashSet<String> getSupportedFeatures()
+  {
+    return supportedFeatures;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public HashSet<String> getSupportedControls()
+  {
+    return supportedControls;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public DN[] getBaseDNs()
+  {
+    return baseDNs;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public long getEntryCount()
+  {
+    if (rootContainer != null)
+    {
+      try
+      {
+        return rootContainer.getEntryCount();
+      }
+      catch (Exception e)
+      {
+        if (debugEnabled())
+        {
+          TRACER.debugCaught(DebugLogLevel.ERROR, e);
+        }
+      }
+    }
+
+    return -1;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public ConditionResult hasSubordinates(DN entryDN)
+         throws DirectoryException
+  {
+    long ret = numSubordinates(entryDN, false);
+    if(ret < 0)
+    {
+      return ConditionResult.UNDEFINED;
+    }
+    else if(ret == 0)
+    {
+      return ConditionResult.FALSE;
+    }
+    else
+    {
+      return ConditionResult.TRUE;
+    }
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public long numSubordinates(DN entryDN, boolean subtree)
+      throws DirectoryException
+  {
+    // NYI.
+    return -1;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public Entry getEntry(DN entryDN) throws DirectoryException
+  {
+    readerBegin();
+
+    EntryContainer ec;
+    if (rootContainer != null)
+    {
+      ec = rootContainer.getEntryContainer(entryDN);
+    }
+    else
+    {
+      Message message = ERR_ROOT_CONTAINER_NOT_INITIALIZED.get(getBackendID());
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+              message);
+    }
+
+    ec.sharedLock.lock();
+    Entry entry;
+    try
+    {
+      entry = ec.getEntry(entryDN);
+    }
+    catch (NdbApiException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw createDirectoryException(e);
+    }
+    catch (NDBException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   e.getMessageObject());
+    }
+    finally
+    {
+      ec.sharedLock.unlock();
+      readerEnd();
+    }
+
+    return entry;
+  }
+
+
+
+  /**
+   * Retrieves the requested entry from this backend.  Note that the
+   * caller must hold a read or write lock on the specified DN. Note
+   * that the lock is held after this method has completed execution.
+   *
+   * @param  entryDN  The distinguished name of the entry to retrieve.
+   * @param  txn      Abstarct transaction for this operation.
+   * @param  lockMode Lock mode for this operation.
+   *
+   * @return  The requested entry, or {@code null} if the entry does
+   *          not exist.
+   *
+   * @throws  DirectoryException  If a problem occurs while trying to
+   *                              retrieve the entry.
+   */
+  public Entry getEntryNoCommit(DN entryDN, AbstractTransaction txn,
+    NdbOperation.LockMode lockMode) throws DirectoryException
+  {
+    readerBegin();
+
+    EntryContainer ec;
+    if (rootContainer != null)
+    {
+      ec = rootContainer.getEntryContainer(entryDN);
+    }
+    else
+    {
+      Message message = ERR_ROOT_CONTAINER_NOT_INITIALIZED.get(getBackendID());
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+              message);
+    }
+
+    ec.sharedLock.lock();
+    Entry entry;
+    try
+    {
+      entry = ec.getEntryNoCommit(entryDN, txn, lockMode);
+    }
+    catch (NdbApiException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw createDirectoryException(e);
+    }
+    catch (NDBException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   e.getMessageObject());
+    }
+    finally
+    {
+      ec.sharedLock.unlock();
+      readerEnd();
+    }
+
+    return entry;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public void addEntry(Entry entry, AddOperation addOperation)
+      throws DirectoryException, CanceledOperationException
+  {
+    throw createDirectoryException(new UnsupportedOperationException());
+  }
+
+
+
+  /**
+   * Adds the provided entry to this backend.  This method must ensure
+   * that the entry is appropriate for the backend and that no entry
+   * already exists with the same DN.  The caller must hold a write
+   * lock on the DN of the provided entry.
+   *
+   * @param  entry         The entry to add to this backend.
+   * @param  addOperation  The add operation with which the new entry
+   *                       is associated.  This may be {@code null}
+   *                       for adds performed internally.
+   * @param  txn           Abstract transaction for this operation.
+   *
+   * @throws DirectoryException  If a problem occurs while trying to
+   *                             add the entry.
+   *
+   * @throws CanceledOperationException  If this backend noticed and
+   *                                       reacted to a request to
+   *                                       cancel or abandon the add
+   *                                       operation.
+   */
+  public void addEntry(Entry entry, AddOperation addOperation,
+    AbstractTransaction txn)
+      throws DirectoryException, CanceledOperationException
+  {
+    writerBegin();
+    DN entryDN = entry.getDN();
+
+    EntryContainer ec;
+    if (rootContainer != null)
+    {
+      ec = rootContainer.getEntryContainer(entryDN);
+    }
+    else
+    {
+      Message message = ERR_ROOT_CONTAINER_NOT_INITIALIZED.get(getBackendID());
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+              message);
+    }
+
+    ec.sharedLock.lock();
+    try
+    {
+      ec.addEntry(entry, addOperation, txn);
+    }
+    catch (NdbApiException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw createDirectoryException(e);
+    }
+    catch (NDBException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   e.getMessageObject());
+    }
+    finally
+    {
+      ec.sharedLock.unlock();
+      writerEnd();
+    }
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public void deleteEntry(DN entryDN, DeleteOperation deleteOperation)
+      throws DirectoryException, CanceledOperationException
+  {
+    throw createDirectoryException(new UnsupportedOperationException());
+  }
+
+
+
+  /**
+   * Removes the specified entry from this backend.  This method must
+   * ensure that the entry exists and that it does not have any
+   * subordinate entries (unless the backend supports a subtree delete
+   * operation and the client included the appropriate information in
+   * the request).  The caller must hold a write lock on the provided
+   * entry DN.
+   *
+   * @param  entryDN          The DN of the entry to remove from this
+   *                          backend.
+   * @param  entry            The entry to delete.
+   * @param  deleteOperation  The delete operation with which this
+   *                          action is associated.  This may be
+   *                          {@code null} for deletes performed
+   *                          internally.
+   * @param  txn              Abstract transaction for this operation.
+   *
+   * @throws DirectoryException  If a problem occurs while trying to
+   *                             remove the entry.
+   *
+   * @throws CanceledOperationException  If this backend noticed and
+   *                                       reacted to a request to
+   *                                       cancel or abandon the
+   *                                       delete operation.
+   */
+  public void deleteEntry(DN entryDN, Entry entry,
+    DeleteOperation deleteOperation, AbstractTransaction txn)
+    throws DirectoryException, CanceledOperationException
+  {
+    writerBegin();
+
+    EntryContainer ec;
+    if (rootContainer != null)
+    {
+      ec = rootContainer.getEntryContainer(entryDN);
+    }
+    else
+    {
+      Message message = ERR_ROOT_CONTAINER_NOT_INITIALIZED.get(getBackendID());
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+              message);
+    }
+
+    ec.sharedLock.lock();
+    try
+    {
+      ec.deleteEntry(entryDN, entry, deleteOperation, txn);
+    }
+    catch (NdbApiException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw createDirectoryException(e);
+    }
+    catch (NDBException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   e.getMessageObject());
+    }
+    finally
+    {
+      ec.sharedLock.unlock();
+      writerEnd();
+    }
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public void replaceEntry(Entry oldEntry, Entry newEntry,
+    ModifyOperation modifyOperation)
+      throws DirectoryException, CanceledOperationException
+  {
+    throw createDirectoryException(new UnsupportedOperationException());
+  }
+
+
+
+  /**
+   * Replaces the specified entry with the provided entry in this
+   * backend. The backend must ensure that an entry already exists
+   * with the same DN as the provided entry. The caller must hold a
+   * write lock on the DN of the provided entry.
+   *
+   * @param oldEntry
+   *          The original entry that is being replaced.
+   * @param newEntry
+   *          The new entry to use in place of the existing entry with
+   *          the same DN.
+   * @param modifyOperation
+   *          The modify operation with which this action is
+   *          associated. This may be {@code null} for modifications
+   *          performed internally.
+   * @param txn
+   *          Abstract transaction for this operation.
+   * @throws DirectoryException
+   *           If a problem occurs while trying to replace the entry.
+   * @throws CanceledOperationException
+   *           If this backend noticed and reacted to a request to
+   *           cancel or abandon the modify operation.
+   */
+  public void replaceEntry(Entry oldEntry, Entry newEntry,
+    ModifyOperation modifyOperation, AbstractTransaction txn)
+      throws DirectoryException, CanceledOperationException
+  {
+    writerBegin();
+
+    DN entryDN = newEntry.getDN();
+    EntryContainer ec;
+    if (rootContainer != null)
+    {
+      ec = rootContainer.getEntryContainer(entryDN);
+    }
+    else
+    {
+      Message message = ERR_ROOT_CONTAINER_NOT_INITIALIZED.get(getBackendID());
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+              message);
+    }
+
+    ec.sharedLock.lock();
+
+    try
+    {
+      ec.replaceEntry(oldEntry, newEntry, modifyOperation, txn);
+    }
+    catch (NdbApiException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw createDirectoryException(e);
+    }
+    catch (NDBException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   e.getMessageObject());
+    }
+    finally
+    {
+      ec.sharedLock.unlock();
+      writerEnd();
+    }
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public void renameEntry(DN currentDN, Entry entry,
+                          ModifyDNOperation modifyDNOperation)
+      throws DirectoryException, CanceledOperationException
+  {
+    throw createDirectoryException(new UnsupportedOperationException());
+  }
+
+
+
+  /**
+   * Moves and/or renames the provided entry in this backend, altering
+   * any subordinate entries as necessary. This must ensure that an
+   * entry already exists with the provided current DN, and that no
+   * entry exists with the target DN of the provided entry. The caller
+   * must hold write locks on both the current DN and the new DN for
+   * the entry.
+   *
+   * @param currentDN
+   *          The current DN of the entry to be replaced.
+   * @param entry
+   *          The new content to use for the entry.
+   * @param modifyDNOperation
+   *          The modify DN operation with which this action is
+   *          associated. This may be {@code null} for modify DN
+   *          operations performed internally.
+   * @param txn
+   *          Abstract transaction for this operation.
+   * @throws DirectoryException
+   *           If a problem occurs while trying to perform the rename.
+   * @throws CanceledOperationException
+   *           If this backend noticed and reacted to a request to
+   *           cancel or abandon the modify DN operation.
+   */
+  public void renameEntry(DN currentDN, Entry entry,
+                          ModifyDNOperation modifyDNOperation,
+                          AbstractTransaction txn)
+      throws DirectoryException, CanceledOperationException
+  {
+    writerBegin();
+
+    EntryContainer currentContainer;
+    if (rootContainer != null)
+    {
+      currentContainer = rootContainer.getEntryContainer(currentDN);
+    }
+    else
+    {
+      Message message = ERR_ROOT_CONTAINER_NOT_INITIALIZED.get(getBackendID());
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+              message);
+    }
+
+    EntryContainer container = rootContainer.getEntryContainer(entry.getDN());
+
+    if (currentContainer != container)
+    {
+      // FIXME: No reason why we cannot implement a move between containers
+      // since the containers share the same database environment.
+      Message msg = WARN_NDB_FUNCTION_NOT_SUPPORTED.get();
+      throw new DirectoryException(ResultCode.UNWILLING_TO_PERFORM,
+                                   msg);
+    }
+    try
+    {
+      currentContainer.sharedLock.lock();
+
+      currentContainer.renameEntry(currentDN, entry, modifyDNOperation, txn);
+    }
+    catch (NdbApiException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw createDirectoryException(e);
+    }
+    catch (NDBException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   e.getMessageObject());
+    }
+    finally
+    {
+      currentContainer.sharedLock.unlock();
+      writerEnd();
+    }
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public void search(SearchOperation searchOperation)
+      throws DirectoryException, CanceledOperationException
+  {
+    // Abort any Group or ACI bulk search operations.
+    List<Control> requestControls = searchOperation.getRequestControls();
+    if (requestControls != null) {
+      for (Control c : requestControls) {
+        if (c.getOID().equals(OID_INTERNAL_GROUP_MEMBERSHIP_UPDATE)) {
+          return;
+        }
+      }
+    }
+
+    EntryContainer ec;
+    if (rootContainer != null)
+    {
+      ec = rootContainer.getEntryContainer(searchOperation.getBaseDN());
+    }
+    else
+    {
+      Message message = ERR_ROOT_CONTAINER_NOT_INITIALIZED.get(getBackendID());
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+              message);
+    }
+    ec.sharedLock.lock();
+
+    readerBegin();
+
+    try
+    {
+      ec.search(searchOperation);
+    }
+    catch (NdbApiException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw createDirectoryException(e);
+    }
+    catch (NDBException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      Message message = ERR_NDB_DATABASE_EXCEPTION.get(e.getMessage());
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   message);
+    }
+    finally
+    {
+      ec.sharedLock.unlock();
+      readerEnd();
+    }
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public void exportLDIF(LDIFExportConfig exportConfig)
+      throws DirectoryException
+  {
+    if (!DirectoryServer.isRunning()) {
+      // No offline export for now.
+      Message message = ERR_NDB_EXPORT_OFFLINE_NOT_SUPPORTED.get();
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   message);
+    }
+
+    try
+    {
+      if (rootContainer == null) {
+        rootContainer = initializeRootContainer();
+      }
+
+      ExportJob exportJob = new ExportJob(exportConfig);
+      exportJob.exportLDIF(rootContainer);
+    }
+    catch (IOException ioe)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, ioe);
+      }
+      Message message = ERR_NDB_IO_ERROR.get(ioe.getMessage());
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   message);
+    }
+    catch (NdbApiException nae)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, nae);
+      }
+      throw createDirectoryException(nae);
+    }
+    catch (LDIFException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   e.getMessageObject());
+    }
+    catch (InitializationException ie)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, ie);
+      }
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   ie.getMessageObject());
+    }
+    catch (ConfigException ce)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, ce);
+      }
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   ce.getMessageObject());
+    }
+    finally
+    {
+      // leave the backend in the same state.
+      if (rootContainer != null)
+      {
+        try
+        {
+          rootContainer.close();
+          rootContainer = null;
+        }
+        catch (NdbApiException e)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, e);
+          }
+        }
+      }
+    }
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public LDIFImportResult importLDIF(LDIFImportConfig importConfig)
+      throws DirectoryException
+  {
+    if (!DirectoryServer.isRunning()) {
+      // No offline import for now.
+      Message message = ERR_NDB_IMPORT_OFFLINE_NOT_SUPPORTED.get();
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   message);
+    }
+
+    try
+    {
+      Importer importer = new Importer(importConfig);
+      if (rootContainer == null) {
+        rootContainer = initializeRootContainer();
+      }
+      return importer.processImport(rootContainer);
+    }
+    catch (IOException ioe)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, ioe);
+      }
+      Message message = ERR_NDB_IO_ERROR.get(ioe.getMessage());
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   message);
+    }
+    catch (NDBException ne)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, ne);
+      }
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   ne.getMessageObject());
+    }
+    catch (InitializationException ie)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, ie);
+      }
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   ie.getMessageObject());
+    }
+    catch (ConfigException ce)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, ce);
+      }
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(),
+                                   ce.getMessageObject());
+    }
+    finally
+    {
+      // leave the backend in the same state.
+      try
+      {
+        if (rootContainer != null)
+        {
+          rootContainer.close();
+          rootContainer = null;
+        }
+      }
+      catch (NdbApiException nae)
+      {
+        if (debugEnabled())
+        {
+          TRACER.debugCaught(DebugLogLevel.ERROR, nae);
+        }
+      }
+    }
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public void createBackup(BackupConfig backupConfig)
+      throws DirectoryException
+  {
+    // Not supported, do nothing.
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public void removeBackup(BackupDirectory backupDirectory, String backupID)
+      throws DirectoryException
+  {
+    // Not supported, do nothing.
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public void restoreBackup(RestoreConfig restoreConfig)
+      throws DirectoryException
+  {
+    // Not supported, do nothing.
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override()
+  public boolean isConfigurationAcceptable(Configuration configuration,
+                                           List<Message> unacceptableReasons)
+  {
+    NdbBackendCfg config = (NdbBackendCfg) configuration;
+    return isConfigurationChangeAcceptable(config, unacceptableReasons);
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  public boolean isConfigurationChangeAcceptable(
+      NdbBackendCfg cfg,
+      List<Message> unacceptableReasons)
+  {
+    return true;
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  public ConfigChangeResult applyConfigurationChange(NdbBackendCfg newCfg)
+  {
+    ConfigChangeResult ccr;
+    ResultCode resultCode = ResultCode.SUCCESS;
+    ArrayList<Message> messages = new ArrayList<Message>();
+
+    ccr = new ConfigChangeResult(resultCode, false, messages);
+    return ccr;
+  }
+
+  /**
+   * Returns a handle to the root container currently used by this backend.
+   * The rootContainer could be NULL if the backend is not initialized.
+   *
+   * @return The RootContainer object currently used by this backend.
+   */
+  public RootContainer getRootContainer()
+  {
+    return rootContainer;
+  }
+
+  /**
+   * Creates a customized DirectoryException from the NdbApiException
+   * thrown by NDB backend.
+   *
+   * @param  e The NdbApiException to be converted.
+   * @return  DirectoryException created from exception.
+   */
+  DirectoryException createDirectoryException(Exception e)
+  {
+    ResultCode resultCode = DirectoryServer.getServerErrorResultCode();
+    Message message = null;
+
+    String jeMessage = e.getMessage();
+    if (jeMessage == null)
+    {
+      jeMessage = stackTraceToSingleLineString(e);
+    }
+    message = ERR_NDB_DATABASE_EXCEPTION.get(jeMessage);
+    return new DirectoryException(resultCode, message, e);
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  public String getClassName()
+  {
+    return CLASS_NAME;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  public LinkedHashMap<String,String> getAlerts()
+  {
+    LinkedHashMap<String,String> alerts = new LinkedHashMap<String,String>();
+
+    alerts.put(ALERT_TYPE_BACKEND_ENVIRONMENT_UNUSABLE,
+               ALERT_DESCRIPTION_BACKEND_ENVIRONMENT_UNUSABLE);
+    return alerts;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  public DN getComponentEntryDN()
+  {
+    return cfg.dn();
+  }
+
+  private RootContainer initializeRootContainer()
+      throws ConfigException, InitializationException
+  {
+    // Open the database environment
+    try
+    {
+      RootContainer rc = new RootContainer(this, cfg);
+      rc.open();
+      return rc;
+    }
+    catch (Exception e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      Message message = ERR_NDB_OPEN_ENV_FAIL.get(e.getMessage());
+      throw new InitializationException(message, e);
+    }
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  public void preloadEntryCache() throws
+    UnsupportedOperationException
+  {
+    throw new UnsupportedOperationException("Operation not supported");
+  }
+
+  /**
+   * Creates one workflow for a given base DN in a backend.
+   *
+   * @param baseDN   the base DN of the workflow to create
+   * @param backend  the backend handled by the workflow
+   *
+   * @return the newly created workflow
+   *
+   * @throws DirectoryException  If the workflow ID for the provided
+   *                             workflow conflicts with the workflow
+   *                             ID of an existing workflow.
+   */
+  private WorkflowImpl createWorkflow(DN baseDN) throws DirectoryException
+  {
+    String backendID = this.getBackendID();
+
+    // Create a root workflow element to encapsulate the backend
+    NDBWorkflowElement rootWE =
+        NDBWorkflowElement.createAndRegister(backendID, this);
+
+    // The workflow ID is "backendID + baseDN".
+    // We cannot use backendID as workflow identifier because a backend
+    // may handle several base DNs. We cannot use baseDN either because
+    // we might want to configure several workflows handling the same
+    // baseDN through different network groups. So a mix of both
+    // backendID and baseDN should be ok.
+    String workflowID = backendID + "#" + baseDN.toString();
+
+    // Create the worklfow for the base DN and register the workflow with
+    // the server.
+    WorkflowImpl workflowImpl = new WorkflowImpl(workflowID, baseDN,
+      rootWE.getWorkflowElementID(), (WorkflowElement) rootWE);
+    workflowImpl.register();
+
+    return workflowImpl;
+  }
+
+  /**
+   * Registers a workflow with the default network group.
+   *
+   * @param workflowImpl  The workflow to register with the
+   *                      default network group
+   *
+   * @throws  DirectoryException  If the workflow is already registered with
+   *                              the default network group
+   */
+  private void registerWorkflowWithDefaultNetworkGroup(
+      WorkflowImpl workflowImpl
+      ) throws DirectoryException
+  {
+    NetworkGroup defaultNetworkGroup = NetworkGroup.getDefaultNetworkGroup();
+    defaultNetworkGroup.registerWorkflow(workflowImpl);
+  }
+
+  /**
+   * Deregisters a workflow with the default network group and
+   * deregisters the workflow with the server. This method is
+   * intended to be called when workflow configuration mode is
+   * auto.
+   *
+   * @param baseDN  the DN of the workflow to deregister
+   */
+  private void deregisterWorkflowWithDefaultNetworkGroup(
+      DN baseDN
+      )
+  {
+    String backendID = this.getBackendID();
+
+    // Get the default network group and deregister all the workflows
+    // being configured for the backend (there is one worklfow per
+    // backend base DN).
+    NetworkGroup defaultNetworkGroup = NetworkGroup.getDefaultNetworkGroup();
+    Workflow workflow = defaultNetworkGroup.deregisterWorkflow(baseDN);
+    WorkflowImpl workflowImpl = (WorkflowImpl) workflow;
+
+    // The workflow ID is "backendID + baseDN".
+    // We cannot use backendID as workflow identifier because a backend
+    // may handle several base DNs. We cannot use baseDN either because
+    // we might want to configure several workflows handling the same
+    // baseDN through different network groups. So a mix of both
+    // backendID and baseDN should be ok.
+    String workflowID = backendID + "#" + baseDN.toString();
+
+    NDBWorkflowElement.remove(backendID);
+    workflowImpl.deregister(workflowID);
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/DatabaseContainer.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/DatabaseContainer.java
new file mode 100644
index 0000000..a051697
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/DatabaseContainer.java
@@ -0,0 +1,83 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.backends.ndb;
+
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import org.opends.server.loggers.debug.DebugTracer;
+
+/**
+ * This class is a wrapper around the NDB database object.
+ */
+public abstract class DatabaseContainer
+{
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+  /**
+   * The database entryContainer.
+   */
+  protected EntryContainer entryContainer;
+
+  /**
+   * The name of the database within the entryContainer.
+   */
+  protected String name;
+
+  /**
+   * Create a new DatabaseContainer object.
+   *
+   * @param name The name of the entry database.
+   * @param entryContainer The entryContainer of the entry database.
+   */
+  protected DatabaseContainer(String name, EntryContainer entryContainer)
+  {
+    this.entryContainer = entryContainer;
+    this.name = name;
+  }
+
+  /**
+   * Get a string representation of this object.
+   * @return return A string representation of this object.
+   */
+  @Override
+  public String toString()
+  {
+    return name;
+  }
+
+  /**
+   * Get the NDB database name for this database container.
+   *
+   * @return NDB database name for this database container.
+   */
+  public String getName()
+  {
+    return name;
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/EntryContainer.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/EntryContainer.java
new file mode 100644
index 0000000..8ed7c19
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/EntryContainer.java
@@ -0,0 +1,2205 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.backends.ndb;
+import com.mysql.cluster.ndbj.NdbApiException;
+import com.mysql.cluster.ndbj.NdbApiPermanentException;
+import com.mysql.cluster.ndbj.NdbApiTemporaryException;
+import com.mysql.cluster.ndbj.NdbError;
+import com.mysql.cluster.ndbj.NdbOperation;
+import org.opends.messages.Message;
+
+import org.opends.server.api.Backend;
+import org.opends.server.core.AddOperation;
+import org.opends.server.core.DeleteOperation;
+import org.opends.server.core.ModifyOperation;
+import org.opends.server.core.ModifyDNOperation;
+import org.opends.server.core.SearchOperation;
+import org.opends.server.types.*;
+import org.opends.server.util.ServerConstants;
+
+import java.util.*;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import static org.opends.messages.NdbMessages.*;
+
+import org.opends.messages.MessageBuilder;
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import org.opends.server.loggers.debug.DebugTracer;
+import static org.opends.server.util.ServerConstants.*;
+import org.opends.server.admin.server.ConfigurationChangeListener;
+import org.opends.server.admin.std.server.NdbBackendCfg;
+import org.opends.server.backends.ndb.OperationContainer.DN2IDSearchCursor;
+import org.opends.server.backends.ndb.OperationContainer.SearchCursorResult;
+import org.opends.server.config.ConfigException;
+
+/**
+ * Storage container for LDAP entries.  Each base DN of a NDB backend is given
+ * its own entry container.  The entry container is the object that implements
+ * the guts of the backend API methods for LDAP operations.
+ */
+public class EntryContainer
+    implements ConfigurationChangeListener<NdbBackendCfg>
+{
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+  /**
+   * The backend to which this entry entryContainer belongs.
+   */
+  private Backend backend;
+
+  /**
+   * The root container in which this entryContainer belongs.
+   */
+  private RootContainer rootContainer;
+
+  /**
+   * The baseDN this entry container is responsible for.
+   */
+  private DN baseDN;
+
+  /**
+   * The backend configuration.
+   */
+  private NdbBackendCfg config;
+
+  /**
+   * The operation container.
+   */
+  private OperationContainer dn2id;
+
+  /**
+   * Cached values from config so they don't have to be retrieved per operation.
+   */
+  private int deadlockRetryLimit;
+
+  private int subtreeDeleteSizeLimit;
+
+  private int subtreeDeleteBatchSize;
+
+  private String databasePrefix;
+
+  /**
+   * A read write lock to handle schema changes and bulk changes.
+   */
+  private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+  final Lock sharedLock = lock.readLock();
+  final Lock exclusiveLock = lock.writeLock();
+
+  /**
+   * Create a new entry entryContainer object.
+   *
+   * @param baseDN  The baseDN this entry container will be responsible for
+   *                storing on disk.
+   * @param databasePrefix The prefix to use in the database names used by
+   *                       this entry container.
+   * @param backend A reference to the NDB backend that is creating this entry
+   *                container.
+   * @param config The configuration of the NDB backend.
+   * @param rootContainer The root container this entry container is in.
+   * @throws ConfigException if a configuration related error occurs.
+   */
+  public EntryContainer(DN baseDN, String databasePrefix, Backend backend,
+                        NdbBackendCfg config, RootContainer rootContainer)
+      throws ConfigException
+  {
+    this.backend = backend;
+    this.baseDN = baseDN;
+    this.config = config;
+    this.rootContainer = rootContainer;
+
+    StringBuilder builder = new StringBuilder(databasePrefix.length());
+    for (int i = 0; i < databasePrefix.length(); i++)
+    {
+      char ch = databasePrefix.charAt(i);
+      if (Character.isLetterOrDigit(ch))
+      {
+        builder.append(ch);
+      }
+      else
+      {
+        builder.append('_');
+      }
+    }
+    this.databasePrefix = builder.toString();
+
+    this.deadlockRetryLimit = config.getDeadlockRetryLimit();
+
+    config.addNdbChangeListener(this);
+  }
+
+  /**
+   * Opens the entryContainer for reading and writing.
+   *
+   * @throws NdbApiException If an error occurs in the NDB database.
+   * @throws ConfigException if a configuration related error occurs.
+   */
+  public void open()
+      throws NdbApiException, ConfigException
+  {
+    try
+    {
+      dn2id = new OperationContainer(BackendImpl.DN2ID_TABLE, this);
+    }
+    catch (NdbApiException de)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, de);
+      }
+      close();
+      throw de;
+    }
+  }
+
+  /**
+   * Closes the entry entryContainer.
+   *
+   * @throws NdbApiException If an error occurs in the NDB database.
+   */
+  public void close()
+      throws NdbApiException
+  {
+    config.removeNdbChangeListener(this);
+
+    rootContainer = null;
+    backend = null;
+    config = null;
+    dn2id = null;
+  }
+
+  /**
+   * Retrieves a reference to the root container in which this entry container
+   * exists.
+   *
+   * @return  A reference to the root container in which this entry container
+   *          exists.
+   */
+  public RootContainer getRootContainer()
+  {
+    return rootContainer;
+  }
+
+  /**
+   * Get the DN database used by this entry entryContainer. The entryContainer
+   * must have been opened.
+   *
+   * @return The DN database.
+   */
+  public OperationContainer getDN2ID()
+  {
+    return dn2id;
+  }
+
+  /**
+   * Processes the specified search in this entryContainer.
+   * Matching entries should be provided back to the core server using the
+   * <CODE>SearchOperation.returnEntry</CODE> method.
+   *
+   * @param searchOperation The search operation to be processed.
+   * @throws DirectoryException If a problem occurs while processing
+   *         the search.
+   * @throws CanceledOperationException If operation is canceled
+   *         while in progress.
+   * @throws NdbApiException If an error occurs in the NDB database.
+   * @throws NDBException If an error occurs in the NDB backend.
+   */
+  public void search(SearchOperation searchOperation)
+       throws CanceledOperationException, DirectoryException,
+       NdbApiException, NDBException
+  {
+    DN baseDN = searchOperation.getBaseDN();
+    SearchScope searchScope = searchOperation.getScope();
+
+    AbstractTransaction txn = new AbstractTransaction(rootContainer);
+
+    int txnRetries = 0;
+    boolean completed = false;
+    while (!completed) {
+      try {
+        // Handle base-object search first.
+        if (searchScope == SearchScope.BASE_OBJECT) {
+          // Fetch the base entry.
+          Entry baseEntry = dn2id.get(txn, baseDN,
+            NdbOperation.LockMode.LM_CommittedRead);
+
+          // The base entry must exist for a successful result.
+          if (baseEntry == null) {
+            // Check for referral entries above the base entry.
+            targetEntryReferrals(txn, baseDN, searchScope);
+
+            Message message =
+              ERR_NDB_SEARCH_NO_SUCH_OBJECT.get(baseDN.toString());
+            DN matchedDN = getMatchedDN(txn, baseDN);
+            throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
+              message, matchedDN, null);
+          }
+
+          if (!isManageDsaITOperation(searchOperation)) {
+            checkTargetForReferral(baseEntry, searchOperation.getScope());
+          }
+
+          if (searchOperation.getFilter().matchesEntry(baseEntry)) {
+            searchOperation.returnEntry(baseEntry, null);
+          }
+
+          completed = true;
+          return;
+        }
+
+        IndexFilter indexFilter = new IndexFilter(txn, this, searchOperation);
+        if (indexFilter.evaluate()) {
+          searchIndexed(searchOperation, indexFilter);
+          completed = true;
+        } else {
+          DN2IDSearchCursor cursor = dn2id.getSearchCursor(txn, baseDN);
+          searchNotIndexed(searchOperation, cursor);
+          completed = true;
+        }
+      } catch (NdbApiTemporaryException databaseException) {
+        if (txnRetries < BackendImpl.TXN_RETRY_LIMIT) {
+          txnRetries++;
+          continue;
+        }
+        throw databaseException;
+      } finally {
+        if (txn != null) {
+          txn.close();
+        }
+      }
+    }
+  }
+
+  /**
+   * We were able to obtain a set of candidate entries for the
+   * search from the indexes.
+   */
+  private void searchIndexed(SearchOperation searchOperation,
+    IndexFilter indexFilter)
+       throws CanceledOperationException, NdbApiException,
+       DirectoryException, NDBException
+  {
+    DN baseDN = searchOperation.getBaseDN();
+    SearchScope searchScope = searchOperation.getScope();
+    boolean manageDsaIT = isManageDsaITOperation(searchOperation);
+
+    AbstractTransaction txn = new AbstractTransaction(rootContainer);
+    try {
+      // Fetch the base entry.
+      Entry baseEntry = null;
+      baseEntry = dn2id.get(txn, baseDN, NdbOperation.LockMode.LM_Read);
+
+      // The base entry must exist for a successful result.
+      if (baseEntry == null) {
+        // Check for referral entries above the base entry.
+        targetEntryReferrals(txn, baseDN, searchScope);
+
+        Message message = ERR_NDB_SEARCH_NO_SUCH_OBJECT.get(baseDN.toString());
+        DN matchedDN = getMatchedDN(txn, baseDN);
+        throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
+          message, matchedDN, null);
+      }
+
+      if (!manageDsaIT) {
+        checkTargetForReferral(baseEntry, searchScope);
+      }
+
+      /*
+       * The base entry is only included for whole subtree search.
+       */
+      if (searchScope == SearchScope.WHOLE_SUBTREE) {
+        if (searchOperation.getFilter().matchesEntry(baseEntry)) {
+          searchOperation.returnEntry(baseEntry, null);
+        }
+      }
+
+      int lookthroughCount = 0;
+      int lookthroughLimit =
+        searchOperation.getClientConnection().getLookthroughLimit();
+
+      indexFilter.scan();
+      try {
+        long eid = indexFilter.getNext();
+
+        while (eid != 0) {
+          if (lookthroughLimit > 0 && lookthroughCount > lookthroughLimit) {
+            //Lookthrough limit exceeded
+            searchOperation.setResultCode(ResultCode.ADMIN_LIMIT_EXCEEDED);
+            searchOperation.appendErrorMessage(
+              NOTE_NDB_LOOKTHROUGH_LIMIT_EXCEEDED.get(lookthroughLimit));
+            return;
+          }
+
+          // Fetch the candidate entry from the database.
+          Entry entry = dn2id.get(txn, eid, NdbOperation.LockMode.LM_Read);
+
+          // We have found a subordinate entry.
+          DN dn = entry.getDN();
+
+          boolean isInScope = true;
+          if (searchScope == SearchScope.SINGLE_LEVEL) {
+            // Check if this entry is an immediate child.
+            if ((dn.getNumComponents() !=
+              baseDN.getNumComponents() + 1)) {
+              isInScope = false;
+            }
+          }
+
+          if (isInScope) {
+            // Process the candidate entry.
+            if (entry != null) {
+              lookthroughCount++;
+              if (manageDsaIT || !entry.isReferral()) {
+                // Filter the entry.
+                if (searchOperation.getFilter().matchesEntry(entry)) {
+                  if (!searchOperation.returnEntry(entry, null)) {
+                    // We have been told to discontinue processing of the
+                    // search. This could be due to size limit exceeded or
+                    // operation cancelled.
+                    return;
+                  }
+                }
+              } else {
+                if (entry.isReferral()) {
+                  try {
+                    checkTargetForReferral(entry, searchScope);
+                  } catch (DirectoryException refException) {
+                    if (refException.getResultCode() == ResultCode.REFERRAL) {
+                      SearchResultReference reference =
+                        new SearchResultReference(
+                        refException.getReferralURLs());
+                      if (!searchOperation.returnReference(dn, reference)) {
+                        // We have been told to discontinue processing of the
+                        // search. This could be due to size limit exceeded or
+                        // operation cancelled.
+                        return;
+                      }
+                    } else {
+                      throw refException;
+                    }
+                  }
+                }
+              }
+            }
+          }
+
+          searchOperation.checkIfCanceled(false);
+
+          // Move to the next record.
+          eid = indexFilter.getNext();
+        }
+      } finally {
+        indexFilter.close();
+      }
+    } finally {
+      if (txn != null) {
+        txn.close();
+      }
+    }
+  }
+
+  /**
+   * We were not able to obtain a set of candidate entries for the
+   * search from the indexes.
+   */
+  private void searchNotIndexed(SearchOperation searchOperation,
+    DN2IDSearchCursor cursor)
+       throws CanceledOperationException, NdbApiException,
+       DirectoryException, NDBException
+  {
+    DN baseDN = searchOperation.getBaseDN();
+    SearchScope searchScope = searchOperation.getScope();
+    boolean manageDsaIT = isManageDsaITOperation(searchOperation);
+
+    AbstractTransaction txn = new AbstractTransaction(rootContainer);
+    try {
+      // Fetch the base entry.
+      Entry baseEntry = null;
+      baseEntry = dn2id.get(txn, baseDN, NdbOperation.LockMode.LM_Read);
+
+      // The base entry must exist for a successful result.
+      if (baseEntry == null) {
+        // Check for referral entries above the base entry.
+        targetEntryReferrals(txn, baseDN, searchScope);
+
+        Message message = ERR_NDB_SEARCH_NO_SUCH_OBJECT.get(baseDN.toString());
+        DN matchedDN = getMatchedDN(txn, baseDN);
+        throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
+          message, matchedDN, null);
+      }
+
+      if (!manageDsaIT) {
+        checkTargetForReferral(baseEntry, searchScope);
+      }
+
+      /*
+       * The base entry is only included for whole subtree search.
+       */
+      if (searchScope == SearchScope.WHOLE_SUBTREE) {
+        if (searchOperation.getFilter().matchesEntry(baseEntry)) {
+          searchOperation.returnEntry(baseEntry, null);
+        }
+      }
+
+      int lookthroughCount = 0;
+      int lookthroughLimit =
+        searchOperation.getClientConnection().getLookthroughLimit();
+
+      cursor.open();
+      try {
+        SearchCursorResult result = cursor.getNext();
+
+        while (result != null) {
+          if (lookthroughLimit > 0 && lookthroughCount > lookthroughLimit) {
+            //Lookthrough limit exceeded
+            searchOperation.setResultCode(ResultCode.ADMIN_LIMIT_EXCEEDED);
+            searchOperation.appendErrorMessage(
+              NOTE_NDB_LOOKTHROUGH_LIMIT_EXCEEDED.get(lookthroughLimit));
+            return;
+          }
+
+          // We have found a subordinate entry.
+          DN dn = DN.decode(result.dn);
+
+          boolean isInScope = true;
+          if (searchScope == SearchScope.SINGLE_LEVEL) {
+            // Check if this entry is an immediate child.
+            if ((dn.getNumComponents() !=
+              baseDN.getNumComponents() + 1)) {
+              isInScope = false;
+            }
+          }
+
+          if (isInScope) {
+            // Fetch the candidate entry from the database.
+            Entry entry = dn2id.get(txn, dn, NdbOperation.LockMode.LM_Read);
+            // Process the candidate entry.
+            if (entry != null) {
+              lookthroughCount++;
+              if (manageDsaIT || !entry.isReferral()) {
+                // Filter the entry.
+                if (searchOperation.getFilter().matchesEntry(entry)) {
+                  if (!searchOperation.returnEntry(entry, null)) {
+                    // We have been told to discontinue processing of the
+                    // search. This could be due to size limit exceeded or
+                    // operation cancelled.
+                    return;
+                  }
+                }
+              } else {
+                if (entry.isReferral()) {
+                  try {
+                    checkTargetForReferral(entry, searchScope);
+                  } catch (DirectoryException refException) {
+                    if (refException.getResultCode() == ResultCode.REFERRAL) {
+                      SearchResultReference reference =
+                        new SearchResultReference(
+                        refException.getReferralURLs());
+                      if (!searchOperation.returnReference(dn, reference)) {
+                        // We have been told to discontinue processing of the
+                        // search. This could be due to size limit exceeded or
+                        // operation cancelled.
+                        return;
+                      }
+                    } else {
+                      throw refException;
+                    }
+                  }
+                }
+              }
+            }
+          }
+
+          searchOperation.checkIfCanceled(false);
+
+          // Move to the next record.
+          result = cursor.getNext();
+        }
+      } finally {
+        cursor.close();
+      }
+    } finally {
+      if (txn != null) {
+        txn.close();
+      }
+    }
+  }
+
+  /**
+   * Adds the provided entry to this database.  This method must ensure that the
+   * entry is appropriate for the database and that no entry already exists with
+   * the same DN.  The caller must hold a write lock on the DN of the provided
+   * entry.
+   *
+   * @param entry        The entry to add to this database.
+   * @param addOperation The add operation with which the new entry is
+   *                     associated.  This may be <CODE>null</CODE> for adds
+   *                     performed internally.
+   * @param txn          Abstract transaction for this operation.
+   * @throws DirectoryException If a problem occurs while trying to add the
+   *                            entry.
+   * @throws CanceledOperationException If operation is canceled
+   *         while in progress.
+   * @throws NdbApiException If an error occurs in the NDB database.
+   * @throws NDBException If an error occurs in the NDB backend.
+   */
+  public void addEntry(Entry entry, AddOperation addOperation,
+    AbstractTransaction txn)
+      throws CanceledOperationException, NdbApiException,
+      DirectoryException, NDBException
+  {
+    TransactedOperation operation = new AddEntryTransaction(entry);
+    invokeTransactedOperation(txn, operation, addOperation, true, false);
+  }
+
+  /**
+   * Adds the provided entry to this database.  This method must ensure that the
+   * entry is appropriate for the database and that no entry already exists with
+   * the same DN.  The caller must hold a write lock on the DN of the provided
+   * entry.
+   *
+   * @param entry        The entry to add to this database.
+   * @param addOperation The add operation with which the new entry is
+   *                     associated.  This may be <CODE>null</CODE> for adds
+   *                     performed internally.
+   * @param txn          Abstract transaction for this operation.
+   * @throws DirectoryException If a problem occurs while trying to add the
+   *                            entry.
+   * @throws CanceledOperationException If operation is canceled
+   *         while in progress.
+   * @throws NdbApiException If an error occurs in the NDB database.
+   * @throws NDBException If an error occurs in the NDB backend.
+   */
+  public void addEntryNoCommit(Entry entry, AddOperation addOperation,
+    AbstractTransaction txn)
+      throws CanceledOperationException, NdbApiException,
+      DirectoryException, NDBException
+  {
+    TransactedOperation operation = new AddEntryTransaction(entry);
+    invokeTransactedOperation(txn, operation, addOperation, false, false);
+  }
+
+  /**
+   * This method is common to all operations invoked under a database
+   * transaction. It retries the operation if the transaction is
+   * aborted due to a deadlock condition, up to a configured maximum
+   * number of retries.
+   *
+   * @param operation An object implementing the TransactedOperation interface.
+   * @throws NdbApiException If an error occurs in the NDB database.
+   * @throws DirectoryException If a Directory Server error occurs.
+   * @throws NDBException If an error occurs in the NDB backend.
+   */
+  private void invokeTransactedOperation(AbstractTransaction txn,
+    TransactedOperation operation, Operation ldapOperation,
+    boolean commit, boolean locked)
+      throws CanceledOperationException, NdbApiException,
+      DirectoryException, NDBException
+  {
+    // Attempt the operation under a transaction until it fails or completes.
+    int txnRetries = 0;
+    boolean completed = false;
+
+    while (!completed)
+    {
+      try
+      {
+        // Invoke the operation.
+        operation.invokeOperation(txn);
+
+        // One last check before committing.
+        if (ldapOperation != null) {
+          ldapOperation.checkIfCanceled(true);
+        }
+
+        // Commit the transaction.
+        if (commit) {
+          txn.commit();
+        }
+        completed = true;
+      }
+      catch (NdbApiTemporaryException databaseException)
+      {
+        if (!locked) {
+          if (txnRetries < BackendImpl.TXN_RETRY_LIMIT) {
+            if (txn != null) {
+              txn.close();
+            }
+            txnRetries++;
+            continue;
+          }
+        }
+        if (debugEnabled()) {
+          TRACER.debugCaught(DebugLogLevel.ERROR,
+            databaseException);
+        }
+        throw databaseException;
+      }
+      catch (NdbApiPermanentException databaseException)
+      {
+        throw databaseException;
+      }
+      catch (DirectoryException directoryException)
+      {
+        throw directoryException;
+      }
+      catch (NDBException NDBException)
+      {
+        throw NDBException;
+      }
+      catch (Exception e)
+      {
+        Message message = ERR_NDB_UNCHECKED_EXCEPTION.get();
+        throw new NDBException(message, e);
+      }
+      finally {
+        if (commit) {
+          if (txn != null) {
+            txn.close();
+          }
+        }
+      }
+    }
+
+    // Do any actions necessary after successful commit,
+    // usually to update the entry cache.
+    operation.postCommitAction();
+  }
+
+  /**
+   * This interface represents any kind of operation on the database
+   * that must be performed under a transaction. A class which implements
+   * this interface does not need to be concerned with creating the
+   * transaction nor retrying the transaction after deadlock.
+   */
+  private interface TransactedOperation
+  {
+    /**
+     * Invoke the operation under the given transaction.
+     *
+     * @param txn The transaction to be used to perform the operation.
+     * @throws NdbApiException If an error occurs in the NDB database.
+     * @throws DirectoryException If a Directory Server error occurs.
+     * @throws NDBException If an error occurs in the NDB backend.
+     */
+    public abstract void invokeOperation(AbstractTransaction txn)
+        throws NdbApiException, DirectoryException,
+        CanceledOperationException, NDBException;
+
+    /**
+     * This method is called after the transaction has successfully
+     * committed.
+     */
+    public abstract void postCommitAction();
+  }
+
+  /**
+   * This inner class implements the Add Entry operation through
+   * the TransactedOperation interface.
+   */
+  private class AddEntryTransaction implements TransactedOperation
+  {
+    /**
+     * The entry to be added.
+     */
+    private Entry entry;
+
+    /**
+     * The DN of the superior entry of the entry to be added.  This can be
+     * null if the entry to be added is a base entry.
+     */
+    DN parentDN;
+
+    /**
+     * The ID of the entry once it has been assigned.
+     */
+    long entryID;
+
+    /**
+     * Create a new Add Entry NdbTransaction.
+     * @param entry The entry to be added.
+     */
+    public AddEntryTransaction(Entry entry)
+    {
+      this.entry = entry;
+      this.parentDN = getParentWithinBase(entry.getDN());
+    }
+
+    /**
+     * Invoke the operation under the given transaction.
+     *
+     * @param txn The transaction to be used to perform the operation.
+     * @throws NdbApiException If an error occurs in the NDB database.
+     * @throws DirectoryException If a Directory Server error occurs.
+     * @throws NDBException If an error occurs in the NDB backend.
+     */
+    public void invokeOperation(AbstractTransaction txn)
+        throws NdbApiException, DirectoryException, NDBException
+    {
+      // Check that the parent entry exists.
+      if (parentDN != null) {
+        // Check for referral entries above the target.
+        targetEntryReferrals(txn, entry.getDN(), null);
+        long parentID = dn2id.getID(txn, parentDN,
+          NdbOperation.LockMode.LM_Read);
+        if (parentID == 0) {
+          Message message = ERR_NDB_ADD_NO_SUCH_OBJECT.get(
+                  entry.getDN().toString());
+          DN matchedDN = getMatchedDN(txn, baseDN);
+          throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
+              message, matchedDN, null);
+        }
+      }
+
+      // First time through, assign the next entryID.
+      if (entryID == 0)
+      {
+        entryID = rootContainer.getNextEntryID(txn.getNdb());
+      }
+
+      // Insert.
+      try {
+        dn2id.insert(txn, entry.getDN(), entryID, entry);
+        txn.execute();
+      } catch (NdbApiException ne) {
+        if (ne.getErrorObj().getClassification() ==
+          NdbError.Classification.ConstraintViolation)
+        {
+          Message message =
+            ERR_NDB_ADD_ENTRY_ALREADY_EXISTS.get(entry.getDN().toString());
+          throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS,
+            message);
+        } else {
+          throw ne;
+        }
+      }
+    }
+
+    /**
+     * This method is called after the transaction has successfully
+     * committed.
+     */
+    public void postCommitAction()
+    {
+
+    }
+  }
+
+  /**
+   * Removes the specified entry from this database.  This method must ensure
+   * that the entry exists and that it does not have any subordinate entries
+   * (unless the database supports a subtree delete operation and the client
+   * included the appropriate information in the request).
+   * The caller must hold a write lock on the provided entry DN.
+   *
+   * @param entryDN         The DN of the entry to remove from this database.
+   * @param entry           The entry to delete.
+   * @param deleteOperation The delete operation with which this action is
+   *                        associated.  This may be <CODE>null</CODE> for
+   *                        deletes performed internally.
+   * @param txn             Abstract transaction for this operation.
+   * @throws DirectoryException If a problem occurs while trying to remove the
+   *                            entry.
+   * @throws CanceledOperationException If operation is canceled
+   *         while in progress.
+   * @throws NdbApiException If an error occurs in the NDB database.
+   * @throws NDBException If an error occurs in the NDB backend.
+   */
+  public void deleteEntry(DN entryDN, Entry entry,
+    DeleteOperation deleteOperation, AbstractTransaction txn)
+    throws CanceledOperationException, DirectoryException,
+    NdbApiException, NDBException
+  {
+    DeleteEntryTransaction operation =
+        new DeleteEntryTransaction(entryDN, entry, deleteOperation);
+
+    boolean isComplete = false;
+
+    while(!isComplete)
+    {
+      invokeTransactedOperation(txn, operation, deleteOperation, true, true);
+
+      if (operation.adminSizeLimitExceeded())
+      {
+        Message message = NOTE_NDB_SUBTREE_DELETE_SIZE_LIMIT_EXCEEDED.get(
+                operation.getDeletedEntryCount());
+        throw new DirectoryException(
+          ResultCode.ADMIN_LIMIT_EXCEEDED,
+          message);
+      }
+      if(operation.batchSizeExceeded())
+      {
+        operation.resetBatchSize();
+        continue;
+      }
+      isComplete = true;
+      Message message =
+          NOTE_NDB_DELETED_ENTRY_COUNT.get(operation.getDeletedEntryCount());
+      MessageBuilder errorMessage = new MessageBuilder();
+      errorMessage.append(message);
+      deleteOperation.setErrorMessage(errorMessage);
+    }
+  }
+
+  /**
+   * Removes the specified entry from this database.  This method must ensure
+   * that the entry exists and that it does not have any subordinate entries
+   * (unless the database supports a subtree delete operation and the client
+   * included the appropriate information in the request).
+   * The caller must hold a write lock on the provided entry DN.
+   *
+   * @param entryDN         The DN of the entry to remove from this database.
+   * @param entry           The entry to delete.
+   * @param deleteOperation The delete operation with which this action is
+   *                        associated.  This may be <CODE>null</CODE> for
+   *                        deletes performed internally.
+   * @param txn             Abstract transaction for this operation.
+   * @throws DirectoryException If a problem occurs while trying to remove the
+   *                            entry.
+   * @throws CanceledOperationException If operation is canceled
+   *         while in progress.
+   * @throws NdbApiException If an error occurs in the NDB database.
+   * @throws NDBException If an error occurs in the NDB backend.
+   */
+  public void deleteEntryNoCommit(DN entryDN, Entry entry,
+    DeleteOperation deleteOperation, AbstractTransaction txn)
+    throws CanceledOperationException, DirectoryException,
+    NdbApiException, NDBException
+  {
+    DeleteEntryTransaction operation =
+        new DeleteEntryTransaction(entryDN, entry, deleteOperation);
+
+    boolean isComplete = false;
+
+    while(!isComplete)
+    {
+      invokeTransactedOperation(txn, operation, deleteOperation, false, true);
+
+      if (operation.adminSizeLimitExceeded())
+      {
+        Message message = NOTE_NDB_SUBTREE_DELETE_SIZE_LIMIT_EXCEEDED.get(
+                operation.getDeletedEntryCount());
+        throw new DirectoryException(
+          ResultCode.ADMIN_LIMIT_EXCEEDED,
+          message);
+      }
+      if(operation.batchSizeExceeded())
+      {
+        operation.resetBatchSize();
+        continue;
+      }
+      isComplete = true;
+      Message message =
+          NOTE_NDB_DELETED_ENTRY_COUNT.get(operation.getDeletedEntryCount());
+      MessageBuilder errorMessage = new MessageBuilder();
+      errorMessage.append(message);
+      deleteOperation.setErrorMessage(errorMessage);
+    }
+  }
+
+  /**
+   * Delete a leaf entry.
+   * The caller must be sure that the entry is indeed a leaf.
+   *
+   * @param txn    The abstract transaction.
+   * @param leafDN The DN of the leaf entry to be deleted.
+   * @param leafID The ID of the leaf entry.
+   * @throws NdbApiException If an error occurs in the NDB database.
+   * @throws DirectoryException If a Directory Server error occurs.
+   * @throws NDBException If an error occurs in the NDB backend.
+   */
+  private void deleteLeaf(AbstractTransaction txn,
+                          DN leafDN,
+                          long leafID,
+                          DeleteOperation operation)
+      throws NdbApiException, DirectoryException, NDBException
+  {
+    Entry entry = dn2id.get(txn, leafDN, NdbOperation.LockMode.LM_Exclusive);
+
+    // Check that the entry exists.
+    if (entry == null)
+    {
+      Message msg = ERR_NDB_MISSING_ID2ENTRY_RECORD.get(Long.toString(leafID));
+      throw new NDBException(msg);
+    }
+
+    if (!isManageDsaITOperation(operation))
+    {
+      checkTargetForReferral(entry, null);
+    }
+
+    // Remove from dn2id.
+    if (!dn2id.remove(txn, entry))
+    {
+      Message msg = ERR_NDB_MISSING_ID2ENTRY_RECORD.get(Long.toString(leafID));
+      throw new NDBException(msg);
+    }
+  }
+
+  /**
+   * Delete the target entry of a delete operation, with appropriate handling
+   * of referral entries. The caller must be sure that the entry is indeed a
+   * leaf.
+   *
+   * @param txn    The abstract transaction.
+   * @param leafDN The DN of the target entry to be deleted.
+   * @throws NdbApiException If an error occurs in the NDB database.
+   * @throws DirectoryException If a Directory Server error occurs.
+   * @throws NDBException If an error occurs in the NDB backend.
+   */
+  private void deleteTarget(AbstractTransaction txn,
+                            DN leafDN, Entry entry,
+                            DeleteOperation operation)
+      throws NdbApiException, DirectoryException, NDBException
+  {
+    // Check that the entry exists.
+    if (entry == null)
+    {
+      Message message = ERR_NDB_DELETE_NO_SUCH_OBJECT.get(leafDN.toString());
+      DN matchedDN = getMatchedDN(txn, baseDN);
+      throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
+          message, matchedDN, null);
+    }
+
+    if (!isManageDsaITOperation(operation))
+    {
+      checkTargetForReferral(entry, null);
+    }
+
+    // Remove from dn2id.
+    if (!dn2id.remove(txn, entry))
+    {
+      Message msg = ERR_NDB_MISSING_DN2ID_RECORD.get(leafDN.toString());
+      throw new NDBException(msg);
+    }
+  }
+
+  /**
+   * This inner class implements the Delete Entry operation through
+   * the TransactedOperation interface.
+   */
+  private class DeleteEntryTransaction implements TransactedOperation
+  {
+    /**
+     * The DN of the entry or subtree to be deleted.
+     */
+    private DN entryDN;
+
+    /**
+     * The entry itself.
+     */
+    private Entry entry;
+
+    /**
+     * The Delete operation.
+     */
+    private DeleteOperation deleteOperation;
+
+    /**
+     * A list of the DNs of all entries deleted by this operation in a batch.
+     * The subtree delete control can cause multiple entries to be deleted.
+     */
+    private ArrayList<DN> deletedDNList;
+
+
+    /**
+     * Indicates whether the subtree delete size limit has been exceeded.
+     */
+    private boolean adminSizeLimitExceeded = false;
+
+
+    /**
+     * Indicates whether the subtree delete batch size has been exceeded.
+     */
+    private boolean batchSizeExceeded = false;
+
+
+    /**
+     * Indicates the count of deleted DNs in the Delete Operation.
+     */
+    private int countDeletedDN;
+
+    /**
+     * Create a new Delete Entry NdbTransaction.
+     * @param entryDN The entry or subtree to be deleted.
+     * @param deleteOperation The Delete operation.
+     */
+    public DeleteEntryTransaction(DN entryDN, Entry entry,
+      DeleteOperation deleteOperation)
+    {
+      this.entryDN = entryDN;
+      this.entry = entry;
+      this.deleteOperation = deleteOperation;
+      deletedDNList = new ArrayList<DN>();
+    }
+
+    /**
+     * Determine whether the subtree delete size limit has been exceeded.
+     * @return true if the size limit has been exceeded.
+     */
+    public boolean adminSizeLimitExceeded()
+    {
+      return adminSizeLimitExceeded;
+    }
+
+    /**
+     * Determine whether the subtree delete batch size has been exceeded.
+     * @return true if the batch size has been exceeded.
+     */
+    public boolean batchSizeExceeded()
+    {
+      return batchSizeExceeded;
+    }
+
+    /**
+     * Resets the batchSizeExceeded parameter to reuse the object
+     * for multiple batches.
+     */
+    public void resetBatchSize()
+    {
+      batchSizeExceeded=false;
+      deletedDNList.clear();
+    }
+
+    /**
+     * Get the number of entries deleted during the operation.
+     * @return The number of entries deleted.
+     */
+    public int getDeletedEntryCount()
+    {
+      return countDeletedDN;
+    }
+
+    /**
+     * Invoke the operation under the given transaction.
+     *
+     * @param txn The transaction to be used to perform the operation.
+     * @throws NdbApiException If an error occurs in the NDB database.
+     * @throws DirectoryException If a Directory Server error occurs.
+     * @throws NDBException If an error occurs in the NDB backend.
+     */
+    public void invokeOperation(AbstractTransaction txn)
+        throws CanceledOperationException, NdbApiException,
+        DirectoryException, NDBException
+    {
+      // Check for referral entries above the target entry.
+      targetEntryReferrals(txn, entryDN, null);
+
+      // Determine whether this is a subtree delete.
+      int adminSizeLimit = subtreeDeleteSizeLimit;
+      int deleteBatchSize = subtreeDeleteBatchSize;
+      boolean isSubtreeDelete = false;
+      List<Control> controls = deleteOperation.getRequestControls();
+      if (controls != null)
+      {
+        for (Control control : controls)
+        {
+          if (control.getOID().equals(OID_SUBTREE_DELETE_CONTROL))
+          {
+            isSubtreeDelete = true;
+          }
+        }
+      }
+
+      if (dn2id.hasSubordinates(txn, entryDN) && !isSubtreeDelete) {
+        // The subtree delete control was not specified and
+        // the target entry is not a leaf.
+        Message message =
+          ERR_NDB_DELETE_NOT_ALLOWED_ON_NONLEAF.get(entryDN.toString());
+        throw new DirectoryException(ResultCode.NOT_ALLOWED_ON_NONLEAF,
+          message);
+      }
+
+      if (isSubtreeDelete) {
+        AbstractTransaction cursorTxn =
+          new AbstractTransaction(rootContainer);
+        DN2IDSearchCursor cursor = dn2id.getSearchCursor(cursorTxn, entryDN);
+        cursor.open();
+        try {
+          SearchCursorResult result = cursor.getNext();
+
+          while (result != null) {
+            // We have found a subordinate entry.
+            if (!isSubtreeDelete) {
+              // The subtree delete control was not specified and
+              // the target entry is not a leaf.
+              Message message =
+                ERR_NDB_DELETE_NOT_ALLOWED_ON_NONLEAF.get(entryDN.toString());
+              throw new DirectoryException(ResultCode.NOT_ALLOWED_ON_NONLEAF,
+                message);
+            }
+
+            // Enforce any subtree delete size limit.
+            if (adminSizeLimit > 0 && countDeletedDN >= adminSizeLimit) {
+              adminSizeLimitExceeded = true;
+              break;
+            }
+
+            // Enforce any subtree delete batch size.
+            if (deleteBatchSize > 0 &&
+              deletedDNList.size() >= deleteBatchSize) {
+              batchSizeExceeded = true;
+              break;
+            }
+
+            /*
+             * Delete this entry which by now must be a leaf because
+             * we have been deleting from the bottom of the tree upwards.
+             */
+            long entryID = result.id;
+            DN subordinateDN = DN.decode(result.dn);
+
+            deleteLeaf(txn, subordinateDN, entryID, deleteOperation);
+
+            deletedDNList.add(subordinateDN);
+            countDeletedDN++;
+
+            if (deleteOperation != null) {
+              deleteOperation.checkIfCanceled(false);
+            }
+
+            result = cursor.getNext();
+          }
+        } finally {
+          cursor.close();
+          cursorTxn.close();
+        }
+      }
+
+      // Finally delete the target entry as it was not included
+      // in the dn2id iteration.
+      if (!adminSizeLimitExceeded && !batchSizeExceeded)
+      {
+        // Enforce any subtree delete size limit.
+        if (adminSizeLimit > 0 && countDeletedDN >= adminSizeLimit)
+        {
+          adminSizeLimitExceeded = true;
+        }
+        else if (deleteBatchSize > 0 &&
+                                      deletedDNList.size() >= deleteBatchSize)
+        {
+          batchSizeExceeded = true;
+        }
+        else
+        {
+          deleteTarget(txn, entryDN, entry, deleteOperation);
+          deletedDNList.add(entryDN);
+          countDeletedDN++;
+        }
+      }
+    }
+
+    /**
+     * This method is called after the transaction has successfully
+     * committed.
+     */
+    public void postCommitAction()
+    {
+
+    }
+  }
+
+  /**
+   * Indicates whether an entry with the specified DN exists.
+   *
+   * @param  txn      Abstract transaction for this operation.
+   * @param  entryDN  The DN of the entry for which to determine existence.
+   *
+   * @return  <CODE>true</CODE> if the specified entry exists,
+   *          or <CODE>false</CODE> if it does not.
+   *
+   * @throws  DirectoryException  If a problem occurs while trying to make the
+   *                              determination.
+   * @throws  NdbApiException     An error occurred during a database operation.
+   */
+  public boolean entryExists(AbstractTransaction txn, DN entryDN)
+      throws DirectoryException, NdbApiException
+  {
+    // Read the ID from dn2id.
+    long id = 0;
+
+    try
+    {
+      id = dn2id.getID(txn, entryDN, NdbOperation.LockMode.LM_CommittedRead);
+    }
+    catch (NdbApiException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+    }
+
+    return id != 0;
+  }
+
+  /**
+   * Fetch an entry by DN retrieves the requested entry.
+   * Note that the caller must hold a read or write lock
+   * on the specified DN.
+   *
+   * @param entryDN  The distinguished name of the entry to retrieve.
+   * @param txn      Abstract transaction for this operation.
+   * @param lockMode Operation lock mode.
+   * @return The requested entry, or <CODE>null</CODE> if the entry does not
+   *         exist.
+   * @throws DirectoryException If a problem occurs while trying to retrieve
+   *                            the entry.
+   * @throws NDBException If an error occurs in the NDB backend.
+   * @throws NdbApiException An error occurred during a database operation.
+   */
+  public Entry getEntryNoCommit(DN entryDN, AbstractTransaction txn,
+    NdbOperation.LockMode lockMode)
+      throws NDBException, NdbApiException, DirectoryException
+  {
+    Entry entry = null;
+
+    GetEntryByDNOperation operation =
+      new GetEntryByDNOperation(entryDN, lockMode);
+
+    try {
+      // Fetch the entry from the database.
+      invokeTransactedOperation(txn, operation, null, false, false);
+    } catch (CanceledOperationException ex) {
+      // No LDAP operation, ignore.
+    }
+
+    entry = operation.getEntry();
+
+    return entry;
+  }
+
+  /**
+   * Fetch an entry by DN retrieves the requested entry.
+   * Note that the caller must hold a read or write lock
+   * on the specified DN.
+   *
+   * @param entryDN The distinguished name of the entry to retrieve.
+   * @return The requested entry, or <CODE>null</CODE> if the entry does not
+   *         exist.
+   * @throws DirectoryException If a problem occurs while trying to retrieve
+   *                            the entry.
+   * @throws NDBException If an error occurs in the NDB backend.
+   * @throws NdbApiException An error occurred during a database operation.
+   */
+  public Entry getEntry(DN entryDN)
+      throws NDBException, NdbApiException, DirectoryException
+  {
+    Entry entry = null;
+
+    GetEntryByDNOperation operation = new GetEntryByDNOperation(entryDN,
+      NdbOperation.LockMode.LM_CommittedRead);
+    AbstractTransaction txn = new AbstractTransaction(rootContainer);
+
+    try {
+      // Fetch the entry from the database.
+      invokeTransactedOperation(txn, operation, null, true, false);
+    } catch (CanceledOperationException ex) {
+      // No LDAP operation, ignore.
+    }
+
+    entry = operation.getEntry();
+
+    return entry;
+  }
+
+  /**
+   * This inner class gets an entry by DN through
+   * the TransactedOperation interface.
+   */
+  private class GetEntryByDNOperation implements TransactedOperation
+  {
+    /**
+     * The retrieved entry.
+     */
+    private Entry entry = null;
+
+    /**
+     * The ID of the retrieved entry.
+     */
+    private long entryID = 0;
+
+    /**
+     * Operation lock mode.
+     */
+    private NdbOperation.LockMode lockMode;
+
+    /**
+     * The DN of the entry to be retrieved.
+     */
+    DN entryDN;
+
+    /**
+     * Create a new transacted operation to retrieve an entry by DN.
+     * @param entryDN The DN of the entry to be retrieved.
+     */
+    public GetEntryByDNOperation(DN entryDN, NdbOperation.LockMode lockMode)
+    {
+      this.entryDN = entryDN;
+      this.lockMode = lockMode;
+    }
+
+    /**
+     * Get the retrieved entry.
+     * @return The retrieved entry.
+     */
+    public Entry getEntry()
+    {
+      return entry;
+    }
+
+    /**
+     * Get the ID of the retrieved entry.
+     * @return The ID of the retrieved entry.
+     */
+    public long getEntryID()
+    {
+      return entryID;
+    }
+
+    /**
+     * Invoke the operation under the given transaction.
+     *
+     * @param txn The transaction to be used to perform the operation
+     * @throws NdbApiException If an error occurs in the NDB database.
+     * @throws DirectoryException If a Directory Server error occurs.
+     * @throws NDBException If an error occurs in the NDB backend.
+     */
+    public void invokeOperation(AbstractTransaction txn)
+      throws NdbApiException, DirectoryException, NDBException
+    {
+      entry = dn2id.get(txn, entryDN, lockMode);
+
+      if (entry == null) {
+        // Check for referral entries above the target entry.
+        targetEntryReferrals(txn, entryDN, null);
+      }
+    }
+
+    /**
+     * This method is called after the transaction has successfully
+     * committed.
+     */
+    public void postCommitAction()
+    {
+      // No implementation required.
+    }
+  }
+
+  /**
+   * The simplest case of replacing an entry in which the entry DN has
+   * not changed.
+   *
+   * @param oldEntry           The old contents of the entry.
+   * @param newEntry           The new contents of the entry
+   * @param modifyOperation The modify operation with which this action is
+   *                        associated.  This may be <CODE>null</CODE> for
+   *                        modifications performed internally.
+   * @param txn             Abstract transaction for this operation.
+   * @throws NdbApiException If an error occurs in the NDB database.
+   * @throws DirectoryException If a Directory Server error occurs.
+   * @throws CanceledOperationException If operation is canceled
+   *         while in progress.
+   * @throws NDBException If an error occurs in the NDB backend.
+   */
+  public void replaceEntry(Entry oldEntry, Entry newEntry,
+    ModifyOperation modifyOperation, AbstractTransaction txn)
+       throws CanceledOperationException, NdbApiException,
+       DirectoryException, NDBException
+  {
+    TransactedOperation operation =
+         new ReplaceEntryTransaction(oldEntry, newEntry, modifyOperation);
+
+    invokeTransactedOperation(txn, operation, modifyOperation, true, true);
+  }
+
+  /**
+   * This inner class implements the Replace Entry operation through
+   * the TransactedOperation interface.
+   */
+  private class ReplaceEntryTransaction implements TransactedOperation
+  {
+    /**
+     * The new contents of the entry.
+     */
+    private Entry newEntry;
+
+    /**
+     * The old contents of the entry.
+     */
+    private Entry oldEntry;
+
+    /**
+     * The Modify operation, or null if the replace is not due to a Modify
+     * operation.
+     */
+    private ModifyOperation modifyOperation;
+
+    /**
+     * The ID of the entry that was replaced.
+     */
+    private Long entryID;
+
+    /**
+     * Create a new transacted operation to replace an entry.
+     * @param entry The new contents of the entry.
+     * @param modifyOperation The Modify operation, or null if the replace is
+     * not due to a Modify operation.
+     */
+    public ReplaceEntryTransaction(Entry oldEntry, Entry newEntry,
+                                   ModifyOperation modifyOperation)
+    {
+      this.oldEntry = oldEntry;
+      this.newEntry = newEntry;
+      this.modifyOperation = modifyOperation;
+    }
+
+    /**
+     * Invoke the operation under the given transaction.
+     *
+     * @param txn The transaction to be used to perform the operation.
+     * @throws NdbApiException If an error occurs in the NDB database.
+     * @throws DirectoryException If a Directory Server error occurs.
+     * @throws NDBException If an error occurs in the NDB backend.
+     */
+    public void invokeOperation(AbstractTransaction txn) throws NdbApiException,
+                                                        DirectoryException,
+                                                        NDBException
+    {
+      DN entryDN = newEntry.getDN();
+      entryID = (Long) oldEntry.getAttachment();
+      if (entryID == 0)
+      {
+        // The entry does not exist.
+        Message message =
+                ERR_NDB_MODIFY_NO_SUCH_OBJECT.get(entryDN.toString());
+        DN matchedDN = getMatchedDN(txn, baseDN);
+        throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
+            message, matchedDN, null);
+      }
+
+      if (!isManageDsaITOperation(modifyOperation))
+      {
+        // Check if the entry is a referral entry.
+        checkTargetForReferral(oldEntry, null);
+      }
+
+      // List<Modification> modsList = modifyOperation.getModifications();
+
+      // Replace.
+      if (!dn2id.put(txn, entryDN, entryID, newEntry, oldEntry))
+      {
+        // The entry does not exist.
+        Message msg = ERR_NDB_MISSING_ID2ENTRY_RECORD.get(
+          Long.toString(entryID));
+        throw new NDBException(msg);
+      }
+    }
+
+    /**
+     * This method is called after the transaction has successfully
+     * committed.
+     */
+    public void postCommitAction()
+    {
+
+    }
+  }
+
+  /**
+   * Moves and/or renames the provided entry in this backend, altering any
+   * subordinate entries as necessary.  This must ensure that an entry already
+   * exists with the provided current DN, and that no entry exists with the
+   * target DN of the provided entry.  The caller must hold write locks on both
+   * the current DN and the new DN for the entry.
+   *
+   * @param currentDN         The current DN of the entry to be replaced.
+   * @param entry             The new content to use for the entry.
+   * @param modifyDNOperation The modify DN operation with which this action
+   *                          is associated.  This may be <CODE>null</CODE>
+   *                          for modify DN operations performed internally.
+   * @param txn               Abstract transaction for this operation.
+   * @throws org.opends.server.types.DirectoryException
+   *          If a problem occurs while trying to perform
+   *          the rename.
+   * @throws org.opends.server.types.CanceledOperationException
+   *          If this backend noticed and reacted
+   *          to a request to cancel or abandon the
+   *          modify DN operation.
+   * @throws NdbApiException If an error occurs in the NDB database.
+   * @throws NDBException If an error occurs in the NDB backend.
+   */
+  public void renameEntry(DN currentDN, Entry entry,
+                          ModifyDNOperation modifyDNOperation,
+                          AbstractTransaction txn)
+      throws NdbApiException, NDBException, DirectoryException,
+      CanceledOperationException {
+    TransactedOperation operation =
+        new RenameEntryTransaction(currentDN, entry, modifyDNOperation);
+
+    invokeTransactedOperation(txn, operation, modifyDNOperation, true, true);
+  }
+
+  /**
+   * This inner class implements the Modify DN operation through
+   * the TransactedOperation interface.
+   */
+  private class RenameEntryTransaction implements TransactedOperation
+  {
+    /**
+     * The DN of the entry to be renamed.
+     */
+    private DN oldApexDN;
+
+    /**
+     * The DN of the superior entry of the entry to be renamed.
+     * This is null if the entry to be renamed is a base entry.
+     */
+    private DN oldSuperiorDN;
+
+    /**
+     * The DN of the new superior entry, which can be the same
+     * as the current superior entry.
+     */
+    private DN newSuperiorDN;
+
+    /**
+     * The new contents of the entry to be renamed.
+     */
+    private Entry newApexEntry;
+
+    /**
+     * The Modify DN operation.
+     */
+    private ModifyDNOperation modifyDNOperation;
+
+    /**
+     * Create a new transacted operation for a Modify DN operation.
+     * @param currentDN The DN of the entry to be renamed.
+     * @param entry The new contents of the entry.
+     * @param modifyDNOperation The Modify DN operation to be performed.
+     */
+    public RenameEntryTransaction(DN currentDN, Entry entry,
+                                  ModifyDNOperation modifyDNOperation)
+    {
+      this.oldApexDN = currentDN;
+      this.oldSuperiorDN = getParentWithinBase(currentDN);
+      this.newSuperiorDN = getParentWithinBase(entry.getDN());
+      this.newApexEntry = entry;
+      this.modifyDNOperation = modifyDNOperation;
+    }
+
+    /**
+     * Invoke the operation under the given transaction.
+     *
+     * @param txn The transaction to be used to perform the operation.
+     * @throws NdbApiException If an error occurs in the NDB database.
+     * @throws DirectoryException If a Directory Server error occurs.
+     * @throws NDBException If an error occurs in the NDB backend.
+     */
+    public void invokeOperation(AbstractTransaction txn)
+      throws NdbApiException, DirectoryException,
+      CanceledOperationException, NDBException
+    {
+      DN requestedNewSuperiorDN = null;
+
+      if (modifyDNOperation != null)
+      {
+        requestedNewSuperiorDN = modifyDNOperation.getNewSuperior();
+      }
+
+      // Check whether the renamed entry already exists.
+      if (dn2id.getID(txn, newApexEntry.getDN(),
+        NdbOperation.LockMode.LM_Exclusive) != 0)
+      {
+        Message message = ERR_NDB_MODIFYDN_ALREADY_EXISTS.get(
+            newApexEntry.getDN().toString());
+        throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS,
+                                     message);
+      }
+
+      Entry oldApexEntry = dn2id.get(txn, oldApexDN,
+        NdbOperation.LockMode.LM_Exclusive);
+      if (oldApexEntry == null)
+      {
+        // Check for referral entries above the target entry.
+        targetEntryReferrals(txn, oldApexDN, null);
+
+        Message message =
+                ERR_NDB_MODIFYDN_NO_SUCH_OBJECT.get(oldApexDN.toString());
+        DN matchedDN = getMatchedDN(txn, baseDN);
+        throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
+            message, matchedDN, null);
+      }
+
+      if (!isManageDsaITOperation(modifyDNOperation))
+      {
+        checkTargetForReferral(oldApexEntry, null);
+      }
+
+      long oldApexID = (Long) oldApexEntry.getAttachment();
+      long newApexID = oldApexID;
+
+      if (newSuperiorDN != null)
+      {
+        long newSuperiorID = dn2id.getID(txn, newSuperiorDN,
+          NdbOperation.LockMode.LM_Exclusive);
+        if (newSuperiorID == 0)
+        {
+          Message msg =
+                  ERR_NDB_NEW_SUPERIOR_NO_SUCH_OBJECT.get(
+                          newSuperiorDN.toString());
+          DN matchedDN = getMatchedDN(txn, baseDN);
+          throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
+              msg, matchedDN, null);
+        }
+        newApexID = rootContainer.getNextEntryID(txn.getNdb());
+      }
+
+      // Move or rename the apex entry.
+      if (requestedNewSuperiorDN != null)
+      {
+        moveApexEntry(txn, newApexID, oldApexEntry, newApexEntry);
+      }
+      else
+      {
+        long newID = rootContainer.getNextEntryID(txn.getNdb());
+        renameApexEntry(txn, newID, oldApexEntry, newApexEntry);
+      }
+
+      AbstractTransaction cursorTxn =
+          new AbstractTransaction(rootContainer);
+      DN2IDSearchCursor cursor = dn2id.getSearchCursor(cursorTxn, oldApexDN);
+      cursor.open();
+
+      try {
+        SearchCursorResult result = cursor.getNext();
+        // Step forward until we pass the ending value.
+        while (result != null) {
+          // We have found a subordinate entry.
+          long oldID = result.id;
+          String oldDN = result.dn;
+          Entry oldEntry = dn2id.get(txn, DN.decode(oldDN),
+            NdbOperation.LockMode.LM_Exclusive);
+
+          if (!isManageDsaITOperation(modifyDNOperation)) {
+            checkTargetForReferral(oldEntry, null);
+          }
+
+          // Construct the new DN of the entry.
+          DN newDN = modDN(oldEntry.getDN(),
+            oldApexDN.getNumComponents(),
+            newApexEntry.getDN());
+
+          if (requestedNewSuperiorDN != null) {
+            // Assign a new entry ID if we are renumbering.
+            long newID = oldID;
+            if (newApexID != oldApexID) {
+              newID = rootContainer.getNextEntryID(txn.getNdb());
+            }
+
+            // Move this entry.
+            moveSubordinateEntry(txn, newID, oldEntry, newDN);
+          } else {
+            // Rename this entry.
+            renameSubordinateEntry(txn, oldID, oldEntry, newDN);
+          }
+
+          if (modifyDNOperation != null) {
+            modifyDNOperation.checkIfCanceled(false);
+          }
+
+          result = cursor.getNext();
+        }
+      } finally {
+        cursor.close();
+        cursorTxn.close();
+      }
+    }
+
+    /**
+     * Update the database for the target entry of a ModDN operation
+     * specifying a new superior.
+     *
+     * @param txn The abstract transaction to be used for the updates.
+     * @param newID The new ID of the target entry, or the original ID if
+     *              the ID has not changed.
+     * @param oldEntry The original contents of the target entry.
+     * @param newEntry The new contents of the target entry.
+     * @throws NDBException If an error occurs in the NDB backend.
+     * @throws DirectoryException If a Directory Server error occurs.
+     * @throws NdbApiException If an error occurs in the NDB database.
+     */
+    private void moveApexEntry(AbstractTransaction txn,
+                               long newID, Entry oldEntry, Entry newEntry)
+        throws NDBException, DirectoryException, NdbApiException
+    {
+      // DN oldDN = oldEntry.getDN();
+      DN newDN = newEntry.getDN();
+
+      // Remove the old DN from dn2id.
+      dn2id.remove(txn, oldEntry);
+
+      // Insert the new DN in dn2id.
+      if (!dn2id.insert(txn, newDN, newID, newEntry))
+      {
+        Message message = ERR_NDB_MODIFYDN_ALREADY_EXISTS.get(newDN.toString());
+        throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS,
+                                     message);
+      }
+    }
+
+    /**
+     * Update the database for the target entry of a Modify DN operation
+     * not specifying a new superior.
+     *
+     * @param txn The abstract transaction to be used for the updates.
+     * @param newID The new ID of the target entry.
+     * @param oldEntry The original contents of the target entry.
+     * @param newEntry The new contents of the target entry.
+     * @throws DirectoryException If a Directory Server error occurs.
+     * @throws NdbApiException If an error occurs in the NDB database.
+     * @throws NDBException if an error occurs in the NDB Backend.
+     */
+    private void renameApexEntry(AbstractTransaction txn, long newID,
+                                 Entry oldEntry, Entry newEntry)
+        throws DirectoryException, NdbApiException, NDBException
+    {
+      // DN oldDN = oldEntry.getDN();
+      DN newDN = newEntry.getDN();
+
+      // Remove the old DN from dn2id.
+      dn2id.remove(txn, oldEntry);
+
+      // Insert the new DN in dn2id.
+      if (!dn2id.insert(txn, newDN, newID, newEntry))
+      {
+        Message message = ERR_NDB_MODIFYDN_ALREADY_EXISTS.get(newDN.toString());
+        throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS,
+                                     message);
+      }
+    }
+
+    /**
+     * Update the database for a subordinate entry of the target entry
+     * of a Modify DN operation specifying a new superior.
+     *
+     * @param txn The abstract transaction to be used for the updates.
+     * @param newID The new ID of the subordinate entry, or the original ID if
+     *              the ID has not changed.
+     * @param oldEntry The original contents of the subordinate entry.
+     * @param newDN The new DN of the subordinate entry.
+     * @throws NDBException If an error occurs in the NDB backend.
+     * @throws DirectoryException If a Directory Server error occurs.
+     * @throws NdbApiException If an error occurs in the NDB database.
+     */
+    private void moveSubordinateEntry(AbstractTransaction txn,
+                                      long newID,
+                                      Entry oldEntry, DN newDN)
+        throws NDBException, DirectoryException, NdbApiException
+    {
+      // Remove the old DN from dn2id.
+      dn2id.remove(txn, oldEntry);
+
+      // Create a new entry that is a copy of the old entry but with the new DN.
+      Entry newEntry = oldEntry.duplicate(false);
+      newEntry.setDN(newDN);
+
+      // Put the new DN in dn2id.
+      if (!dn2id.insert(txn, newDN, newID, newEntry))
+      {
+        Message message = ERR_NDB_MODIFYDN_ALREADY_EXISTS.get(newDN.toString());
+        throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS,
+                                     message);
+      }
+    }
+
+    /**
+     * Update the database for a subordinate entry of the target entry
+     * of a Modify DN operation not specifying a new superior.
+     *
+     * @param txn The abstract transaction to be used for the updates.
+     * @param entryID The ID of the subordinate entry.
+     * @param oldEntry The original contents of the subordinate entry.
+     * @param newDN The new DN of the subordinate entry.
+     * @throws DirectoryException If a Directory Server error occurs.
+     * @throws NdbApiException If an error occurs in the NDB database.
+     */
+    private void renameSubordinateEntry(AbstractTransaction txn, long entryID,
+                                        Entry oldEntry, DN newDN)
+        throws DirectoryException, NDBException, NdbApiException
+    {
+      // Remove the old DN from dn2id.
+      dn2id.remove(txn, oldEntry);
+
+      // Create a new entry that is a copy of the old entry but with the new DN.
+      Entry newEntry = oldEntry.duplicate(false);
+      newEntry.setDN(newDN);
+
+      // Insert the new DN in dn2id.
+      if (!dn2id.insert(txn, newDN, entryID, newEntry))
+      {
+        Message message = ERR_NDB_MODIFYDN_ALREADY_EXISTS.get(newDN.toString());
+        throw new DirectoryException(ResultCode.ENTRY_ALREADY_EXISTS,
+                                     message);
+      }
+    }
+
+    /**
+     * This method is called after the transaction has successfully
+     * committed.
+     */
+    public void postCommitAction()
+    {
+      // No implementation needed.
+    }
+  }
+
+  /**
+   * Make a new DN for a subordinate entry of a renamed or moved entry.
+   *
+   * @param oldDN The current DN of the subordinate entry.
+   * @param oldSuffixLen The current DN length of the renamed or moved entry.
+   * @param newSuffixDN The new DN of the renamed or moved entry.
+   * @return The new DN of the subordinate entry.
+   */
+  public static DN modDN(DN oldDN, int oldSuffixLen, DN newSuffixDN)
+  {
+    int oldDNNumComponents    = oldDN.getNumComponents();
+    int oldDNKeepComponents   = oldDNNumComponents - oldSuffixLen;
+    int newSuffixDNComponents = newSuffixDN.getNumComponents();
+
+    RDN[] newDNComponents = new RDN[oldDNKeepComponents+newSuffixDNComponents];
+    for (int i=0; i < oldDNKeepComponents; i++)
+    {
+      newDNComponents[i] = oldDN.getRDN(i);
+    }
+
+    for (int i=oldDNKeepComponents, j=0; j < newSuffixDNComponents; i++,j++)
+    {
+      newDNComponents[i] = newSuffixDN.getRDN(j);
+    }
+
+    return new DN(newDNComponents);
+  }
+
+  /**
+   * Get a count of the number of entries stored in this entry entryContainer.
+   *
+   * @return The number of entries stored in this entry entryContainer.
+   * @throws NdbApiException If an error occurs in the NDB database.
+   */
+  public long getEntryCount() throws NdbApiException
+  {
+    return dn2id.getRecordCount();
+  }
+
+  /**
+   * Get the number of values for which the entry limit has been exceeded
+   * since the entry entryContainer was opened.
+   * @return The number of values for which the entry limit has been exceeded.
+   */
+  public int getEntryLimitExceededCount()
+  {
+    int count = 0;
+    return count;
+  }
+
+  /**
+   * Get a list of the databases opened by this entryContainer.
+   * @param dbList A list of database containers.
+   */
+  public void listDatabases(List<DatabaseContainer> dbList)
+  {
+    dbList.add(dn2id);
+  }
+
+  /**
+   * Determine whether the provided operation has the ManageDsaIT request
+   * control.
+   * @param operation The operation for which the determination is to be made.
+   * @return true if the operation has the ManageDsaIT request control, or false
+   * if not.
+   */
+  public static boolean isManageDsaITOperation(Operation operation)
+  {
+    if(operation != null)
+    {
+      List<Control> controls = operation.getRequestControls();
+      if (controls != null)
+      {
+        for (Control control : controls)
+        {
+          if (control.getOID().equals(ServerConstants.OID_MANAGE_DSAIT_CONTROL))
+          {
+            return true;
+          }
+        }
+      }
+    }
+    return false;
+  }
+
+  /**
+   * This method constructs a container name from a base DN. Only alphanumeric
+   * characters are preserved, all other characters are replaced with an
+   * underscore.
+   *
+   * @return The container name for the base DN.
+   */
+  public String getDatabasePrefix()
+  {
+    return databasePrefix;
+  }
+
+  /**
+   * Get the baseDN this entry container is responsible for.
+   *
+   * @return The Base DN for this entry container.
+   */
+  public DN getBaseDN()
+  {
+    return baseDN;
+  }
+
+  /**
+   * Get the parent of a DN in the scope of the base DN.
+   *
+   * @param dn A DN which is in the scope of the base DN.
+   * @return The parent DN, or null if the given DN is the base DN.
+   */
+  public DN getParentWithinBase(DN dn)
+  {
+    if (dn.equals(baseDN))
+    {
+      return null;
+    }
+    return dn.getParent();
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  public synchronized boolean isConfigurationChangeAcceptable(
+      NdbBackendCfg cfg, List<Message> unacceptableReasons)
+  {
+    // This is always true because only all config attributes used
+    // by the entry container should be validated by the admin framework.
+    return true;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  public synchronized ConfigChangeResult applyConfigurationChange(
+      NdbBackendCfg cfg)
+  {
+    boolean adminActionRequired = false;
+    ArrayList<Message> messages = new ArrayList<Message>();
+
+    this.config = cfg;
+    this.deadlockRetryLimit = config.getDeadlockRetryLimit();
+
+    return new ConfigChangeResult(ResultCode.SUCCESS,
+                                  adminActionRequired, messages);
+  }
+
+  /**
+   * Checks whether the target of an operation is a referral entry and throws
+   * a Directory referral exception if it is.
+   * @param entry The target entry of the operation, or the base entry of a
+   * search operation.
+   * @param searchScope The scope of the search operation, or null if the
+   * operation is not a search operation.
+   * @throws DirectoryException If a referral is found at or above the target
+   * DN.  The referral URLs will be set appropriately for the references found
+   * in the referral entry.
+   */
+  public void checkTargetForReferral(Entry entry, SearchScope searchScope)
+       throws DirectoryException
+  {
+    Set<String> referralURLs = entry.getReferralURLs();
+    if (referralURLs != null)
+    {
+      throwReferralException(entry.getDN(), entry.getDN(), referralURLs,
+                             searchScope);
+    }
+  }
+
+  /**
+   * Throws a Directory referral exception for the case where a referral entry
+   * exists at or above the target DN of an operation.
+   * @param targetDN The target DN of the operation, or the base object of a
+   * search operation.
+   * @param referralDN The DN of the referral entry.
+   * @param labeledURIs The set of labeled URIs in the referral entry.
+   * @param searchScope The scope of the search operation, or null if the
+   * operation is not a search operation.
+   * @throws DirectoryException If a referral is found at or above the target
+   * DN.  The referral URLs will be set appropriately for the references found
+   * in the referral entry.
+   */
+  public void throwReferralException(DN targetDN, DN referralDN,
+                                     Set<String> labeledURIs,
+                                     SearchScope searchScope)
+       throws DirectoryException
+  {
+    ArrayList<String> URIList = new ArrayList<String>(labeledURIs.size());
+    for (String labeledURI : labeledURIs)
+    {
+      // Remove the label part of the labeled URI if there is a label.
+      String uri = labeledURI;
+      int i = labeledURI.indexOf(' ');
+      if (i != -1)
+      {
+        uri = labeledURI.substring(0, i);
+      }
+
+      try
+      {
+        LDAPURL ldapurl = LDAPURL.decode(uri, false);
+
+        if (ldapurl.getScheme().equalsIgnoreCase("ldap"))
+        {
+          DN urlBaseDN = targetDN;
+          if (!referralDN.equals(ldapurl.getBaseDN()))
+          {
+            urlBaseDN =
+                 EntryContainer.modDN(targetDN,
+                                      referralDN.getNumComponents(),
+                                      ldapurl.getBaseDN());
+          }
+          ldapurl.setBaseDN(urlBaseDN);
+          if (searchScope == null)
+          {
+            // RFC 3296, 5.2.  Target Object Considerations:
+            // In cases where the URI to be returned is a LDAP URL, the server
+            // SHOULD trim any present scope, filter, or attribute list from the
+            // URI before returning it.  Critical extensions MUST NOT be trimmed
+            // or modified.
+            StringBuilder builder = new StringBuilder(uri.length());
+            ldapurl.toString(builder, true);
+            uri = builder.toString();
+          }
+          else
+          {
+            // RFC 3296, 5.3.  Base Object Considerations:
+            // In cases where the URI to be returned is a LDAP URL, the server
+            // MUST provide an explicit scope specifier from the LDAP URL prior
+            // to returning it.
+            ldapurl.getAttributes().clear();
+            ldapurl.setScope(searchScope);
+            ldapurl.setFilter(null);
+            uri = ldapurl.toString();
+          }
+        }
+      }
+      catch (DirectoryException e)
+      {
+        if (debugEnabled())
+        {
+          TRACER.debugCaught(DebugLogLevel.ERROR, e);
+        }
+        // Return the non-LDAP URI as is.
+      }
+
+      URIList.add(uri);
+    }
+
+    // Throw a directory referral exception containing the URIs.
+    Message msg =
+        NOTE_NDB_REFERRAL_RESULT_MESSAGE.get(String.valueOf(referralDN));
+    throw new DirectoryException(
+            ResultCode.REFERRAL, msg, referralDN, URIList, null);
+  }
+
+  /**
+   * Process referral entries that are above the target DN of an operation.
+   * @param txn      Abstract transaction for this operation.
+   * @param targetDN The target DN of the operation, or the base object of a
+   * search operation.
+   * @param searchScope The scope of the search operation, or null if the
+   * operation is not a search operation.
+   * @throws DirectoryException If a referral is found at or above the target
+   * DN.  The referral URLs will be set appropriately for the references found
+   * in the referral entry.
+   * @throws NdbApiException An error occurred during a database operation.
+   */
+  public void targetEntryReferrals(AbstractTransaction txn,
+    DN targetDN, SearchScope searchScope)
+       throws DirectoryException, NdbApiException
+  {
+    try {
+      // Go up through the DIT hierarchy until we find a referral.
+      for (DN dn = getParentWithinBase(targetDN); dn != null;
+        dn = getParentWithinBase(dn)) {
+        // Construct a set of all the labeled URIs in the referral.
+        long id = dn2id.getID(txn, dn, NdbOperation.LockMode.LM_Read);
+        Set<String> labeledURIs = dn2id.getReferrals(txn, id);
+        if (!labeledURIs.isEmpty()) {
+          throwReferralException(targetDN, dn, labeledURIs, searchScope);
+        }
+      }
+    }
+    catch (NdbApiException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+    }
+  }
+
+
+  /**
+   * Finds an existing entry whose DN is the closest ancestor of a given baseDN.
+   *
+   * @param baseDN  the DN for which we are searching a matched DN
+   * @return the DN of the closest ancestor of the baseDN
+   * @throws DirectoryException If an error prevented the check of an
+   * existing entry from being performed
+   */
+  private DN getMatchedDN(AbstractTransaction txn, DN baseDN)
+    throws DirectoryException, NdbApiException
+  {
+    DN matchedDN = null;
+    DN parentDN  = baseDN.getParentDNInSuffix();
+    while ((parentDN != null) && parentDN.isDescendantOf(getBaseDN()))
+    {
+      if (entryExists(txn, parentDN))
+      {
+        matchedDN = parentDN;
+        break;
+      }
+      parentDN = parentDN.getParentDNInSuffix();
+    }
+    return matchedDN;
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/ExportJob.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/ExportJob.java
new file mode 100644
index 0000000..1316b3b
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/ExportJob.java
@@ -0,0 +1,293 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.backends.ndb;
+import com.mysql.cluster.ndbj.NdbApiException;
+import com.mysql.cluster.ndbj.NdbOperation;
+import org.opends.messages.Message;
+
+import org.opends.server.types.DN;
+import org.opends.server.types.DirectoryException;
+import org.opends.server.types.Entry;
+import org.opends.server.types.LDIFExportConfig;
+import org.opends.server.util.LDIFException;
+
+import java.io.IOException;
+import java.util.*;
+
+import org.opends.server.backends.ndb.OperationContainer.DN2IDSearchCursor;
+import org.opends.server.backends.ndb.OperationContainer.SearchCursorResult;
+import org.opends.server.types.DebugLogLevel;
+import static org.opends.server.loggers.ErrorLogger.logError;
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import org.opends.server.loggers.debug.DebugTracer;
+import static org.opends.messages.NdbMessages.*;
+
+/**
+ * Export a NDB backend to LDIF.
+ */
+public class ExportJob
+{
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+
+  /**
+   * The requested LDIF export configuration.
+   */
+  private LDIFExportConfig exportConfig;
+
+  /**
+   * The number of milliseconds between job progress reports.
+   */
+  private long progressInterval = 10000;
+
+  /**
+   * The current number of entries exported.
+   */
+  private long exportedCount = 0;
+
+  /**
+   * The current number of entries skipped.
+   */
+  private long skippedCount = 0;
+
+  /**
+   * Create a new export job.
+   *
+   * @param exportConfig The requested LDIF export configuration.
+   */
+  public ExportJob(LDIFExportConfig exportConfig)
+  {
+    this.exportConfig = exportConfig;
+  }
+
+  /**
+   * Export entries from the backend to an LDIF file.
+   * @param rootContainer The root container to export.
+   * @throws NdbApiException If an error occurs in the NDB database.
+   * @throws IOException If an I/O error occurs while writing an entry.
+   * @throws LDIFException If an error occurs while trying to determine whether
+   * to write an entry.
+   */
+  public void exportLDIF(RootContainer rootContainer)
+       throws IOException, LDIFException, NdbApiException
+  {
+    List<DN> includeBranches = exportConfig.getIncludeBranches();
+    DN baseDN;
+    ArrayList<EntryContainer> exportContainers =
+        new ArrayList<EntryContainer>();
+
+    for (EntryContainer entryContainer : rootContainer.getEntryContainers())
+    {
+      // Skip containers that are not covered by the include branches.
+      baseDN = entryContainer.getBaseDN();
+
+      if (includeBranches == null || includeBranches.isEmpty())
+      {
+        exportContainers.add(entryContainer);
+      }
+      else
+      {
+        for (DN includeBranch : includeBranches)
+        {
+          if (includeBranch.isDescendantOf(baseDN) ||
+               includeBranch.isAncestorOf(baseDN))
+          {
+            exportContainers.add(entryContainer);
+          }
+        }
+      }
+    }
+
+    // Make a note of the time we started.
+    long startTime = System.currentTimeMillis();
+
+    // Start a timer for the progress report.
+    Timer timer = new Timer();
+    TimerTask progressTask = new ProgressTask();
+    timer.scheduleAtFixedRate(progressTask, progressInterval,
+                              progressInterval);
+
+    // Iterate through the containers.
+    try
+    {
+      for (EntryContainer exportContainer : exportContainers)
+      {
+        if (exportConfig.isCancelled())
+        {
+          break;
+        }
+
+        exportContainer.sharedLock.lock();
+        try
+        {
+          exportContainer(exportContainer);
+        }
+        finally
+        {
+          exportContainer.sharedLock.unlock();
+        }
+      }
+    }
+    finally
+    {
+      timer.cancel();
+    }
+
+
+    long finishTime = System.currentTimeMillis();
+    long totalTime = (finishTime - startTime);
+
+    float rate = 0;
+    if (totalTime > 0)
+    {
+      rate = 1000f*exportedCount / totalTime;
+    }
+
+    Message message = NOTE_NDB_EXPORT_FINAL_STATUS.get(
+        exportedCount, skippedCount, totalTime/1000, rate);
+    logError(message);
+
+  }
+
+  /**
+   * Export the entries in a single entry entryContainer, in other words from
+   * one of the base DNs.
+   * @param entryContainer The entry container that holds the entries to be
+   *                       exported.
+   * @throws NdbApiException If an error occurs in the NDB database.
+   * @throws IOException If an error occurs while writing an entry.
+   * @throws  LDIFException  If an error occurs while trying to determine
+   *                         whether to write an entry.
+   */
+  private void exportContainer(EntryContainer entryContainer)
+       throws IOException, LDIFException, NdbApiException
+  {
+    OperationContainer dn2id = entryContainer.getDN2ID();
+    RootContainer rc = entryContainer.getRootContainer();
+    DN baseDN = DN.NULL_DN;
+
+    AbstractTransaction txn = new AbstractTransaction(rc);
+
+    DN2IDSearchCursor cursor = dn2id.getSearchCursor(txn, baseDN);
+    cursor.open();
+
+    try {
+      SearchCursorResult result = cursor.getNext();
+      while (result != null) {
+        if (exportConfig.isCancelled()) {
+          break;
+        }
+        DN dn = null;
+        try {
+          dn = DN.decode(result.dn);
+        } catch (DirectoryException ex) {
+          if (debugEnabled()) {
+            TRACER.debugCaught(DebugLogLevel.ERROR, ex);
+          }
+          skippedCount++;
+          continue;
+        }
+        Entry entry = null;
+        AbstractTransaction leafTxn = new AbstractTransaction(rc);
+        try {
+          entry = dn2id.get(leafTxn, dn,
+            NdbOperation.LockMode.LM_CommittedRead);
+        } finally {
+          if (leafTxn != null) {
+            leafTxn.close();
+          }
+        }
+        if ((entry != null) && entry.toLDIF(exportConfig)) {
+          exportedCount++;
+        } else {
+          skippedCount++;
+        }
+        // Move to the next record.
+        result = cursor.getNext();
+      }
+    } finally {
+      cursor.close();
+      if (txn != null) {
+        txn.close();
+      }
+    }
+  }
+
+  /**
+   * This class reports progress of the export job at fixed intervals.
+   */
+  class ProgressTask extends TimerTask
+  {
+    /**
+     * The number of entries that had been exported at the time of the
+     * previous progress report.
+     */
+    private long previousCount = 0;
+
+    /**
+     * The time in milliseconds of the previous progress report.
+     */
+    private long previousTime;
+
+    /**
+     * Create a new export progress task.
+     */
+    public ProgressTask()
+    {
+      previousTime = System.currentTimeMillis();
+    }
+
+    /**
+     * The action to be performed by this timer task.
+     */
+    public void run()
+    {
+      long latestCount = exportedCount;
+      long deltaCount = (latestCount - previousCount);
+      long latestTime = System.currentTimeMillis();
+      long deltaTime = latestTime - previousTime;
+
+      if (deltaTime == 0)
+      {
+        return;
+      }
+
+      float rate = 1000f*deltaCount / deltaTime;
+
+      Message message =
+          NOTE_NDB_EXPORT_PROGRESS_REPORT.get(latestCount, skippedCount, rate);
+      logError(message);
+
+      previousCount = latestCount;
+      previousTime = latestTime;
+    }
+  };
+
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/IndexFilter.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/IndexFilter.java
new file mode 100644
index 0000000..b6cbcdb
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/IndexFilter.java
@@ -0,0 +1,244 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.backends.ndb;
+
+import com.mysql.cluster.ndbj.NdbApiException;
+import com.mysql.cluster.ndbj.NdbIndexScanOperation;
+import com.mysql.cluster.ndbj.NdbOperation;
+import com.mysql.cluster.ndbj.NdbOperation.AbortOption;
+import com.mysql.cluster.ndbj.NdbResultSet;
+import com.mysql.cluster.ndbj.NdbTransaction;
+import com.mysql.cluster.ndbj.NdbTransaction.ExecType;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import org.opends.server.core.SearchOperation;
+import org.opends.server.types.SearchFilter;
+
+/**
+ * An index filter is used to apply a search operation to a set of indexes
+ * to generate a set of candidate entries.
+ */
+public class IndexFilter
+{
+  /**
+   * The entry entryContainer holding the attribute indexes.
+   */
+  private EntryContainer entryContainer;
+
+  /**
+   * The search operation provides the search base, scope and filter.
+   * It can also be checked periodically for cancellation.
+   */
+  private SearchOperation searchOp;
+
+  private boolean defined;
+  private SearchFilter searchFilter;
+  private List<NdbResultSet> rsList;
+  private NdbTransaction ndbTxn;
+  private AbstractTransaction txn;
+  private Iterator<NdbResultSet> resultsIterator;
+  private NdbResultSet currentRs;
+  private Set<Long> returnedSet;
+
+  /**
+   * Construct an index filter for a search operation.
+   *
+   * @param txn            Abstract transaction.
+   * @param entryContainer The entry entryContainer.
+   * @param searchOp       The search operation to be evaluated.
+   * @throws NdbApiException An error occurred during a database operation.
+   */
+  public IndexFilter(AbstractTransaction txn,
+                     EntryContainer entryContainer,
+                     SearchOperation searchOp) throws NdbApiException
+  {
+    this.txn = txn;
+    this.entryContainer = entryContainer;
+    this.searchOp = searchOp;
+    this.searchFilter = this.searchOp.getFilter();
+    this.ndbTxn = txn.getNdbTransaction();
+    this.rsList = new ArrayList<NdbResultSet>();
+    this.resultsIterator = null;
+    this.returnedSet = new HashSet<Long>();
+    this.currentRs = null;
+    this.defined = false;
+  }
+
+  /**
+   * Perform index filter scan.
+   * @throws NdbApiException An error occurred during a database operation.
+   */
+  public void scan() throws NdbApiException {
+
+    ndbTxn.execute(ExecType.NoCommit, AbortOption.AbortOnError, true);
+    resultsIterator = rsList.iterator();
+    currentRs = resultsIterator.next();
+  }
+
+  /**
+   * Get next entry id from index scan results.
+   * @return next entry id or zero if none.
+   * @throws NdbApiException An error occurred during a database operation.
+   */
+  public long getNext() throws NdbApiException {
+
+    long eid = 0;
+
+    while (!currentRs.next()) {
+      if (resultsIterator.hasNext()) {
+        currentRs = resultsIterator.next();
+      } else {
+        return eid;
+      }
+    }
+
+    eid = currentRs.getLong(BackendImpl.EID);
+    if (!returnedSet.add(eid)) {
+      return getNext();
+    }
+
+    return eid;
+  }
+
+  /**
+   * Evaluate index filter.
+   * @return true if the filter is defined, false otherwise.
+   * @throws NdbApiException An error occurred during a database operation.
+   */
+  public boolean evaluate()
+    throws NdbApiException
+  {
+    defined = false;
+    return evaluateFilter(searchFilter);
+  }
+
+  private boolean evaluateFilter(SearchFilter filter)
+    throws NdbApiException
+  {
+    String attrName = null;
+
+    switch (filter.getFilterType()) {
+      case AND:
+      case OR:
+        for (SearchFilter compFilter :
+             filter.getFilterComponents())
+        {
+          evaluateFilter(compFilter);
+        }
+        break;
+
+      case EQUALITY:
+      case APPROXIMATE_MATCH:
+        attrName = filter.getAttributeType().getNameOrOID();
+        if (BackendImpl.indexes.contains(attrName)) {
+          NdbIndexScanOperation indexScanOp =
+            ndbTxn.getSelectIndexScanOperation(BackendImpl.IDX_VAL,
+            BackendImpl.IDX_TABLE_PREFIX + attrName,
+            NdbOperation.LockMode.LM_CommittedRead);
+          indexScanOp.setBoundString(BackendImpl.IDX_VAL,
+            NdbIndexScanOperation.BoundType.BoundEQ,
+            filter.getAssertionValue().toString());
+          indexScanOp.getValue(BackendImpl.EID);
+          NdbResultSet rs = indexScanOp.resultData();
+          rsList.add(rs);
+          defined = true;
+        }
+        break;
+
+      case GREATER_OR_EQUAL:
+        attrName = filter.getAttributeType().getNameOrOID();
+        if (BackendImpl.indexes.contains(attrName)) {
+          NdbIndexScanOperation indexScanOp =
+            ndbTxn.getSelectIndexScanOperation(BackendImpl.IDX_VAL,
+            BackendImpl.IDX_TABLE_PREFIX + attrName,
+            NdbOperation.LockMode.LM_CommittedRead);
+          indexScanOp.setBoundString(BackendImpl.IDX_VAL,
+            NdbIndexScanOperation.BoundType.BoundGE,
+            filter.getAssertionValue().toString());
+          indexScanOp.getValue(BackendImpl.EID);
+          NdbResultSet rs = indexScanOp.resultData();
+          rsList.add(rs);
+          defined = true;
+        }
+        break;
+
+      case LESS_OR_EQUAL:
+        attrName = filter.getAttributeType().getNameOrOID();
+        if (BackendImpl.indexes.contains(attrName)) {
+          NdbIndexScanOperation indexScanOp =
+            ndbTxn.getSelectIndexScanOperation(BackendImpl.IDX_VAL,
+            BackendImpl.IDX_TABLE_PREFIX + attrName,
+            NdbOperation.LockMode.LM_CommittedRead);
+          indexScanOp.setBoundString(BackendImpl.IDX_VAL,
+            NdbIndexScanOperation.BoundType.BoundLE,
+            filter.getAssertionValue().toString());
+          indexScanOp.getValue(BackendImpl.EID);
+          NdbResultSet rs = indexScanOp.resultData();
+          rsList.add(rs);
+          defined = true;
+        }
+        break;
+
+      case PRESENT:
+        attrName = filter.getAttributeType().getNameOrOID();
+        if (BackendImpl.indexes.contains(attrName)) {
+          NdbIndexScanOperation indexScanOp =
+            ndbTxn.getSelectIndexScanOperation(BackendImpl.IDX_VAL,
+            BackendImpl.IDX_TABLE_PREFIX + attrName,
+            NdbOperation.LockMode.LM_CommittedRead);
+          indexScanOp.setBoundString(BackendImpl.IDX_VAL,
+            NdbIndexScanOperation.BoundType.BoundLT, "");
+          indexScanOp.getValue(BackendImpl.EID);
+          NdbResultSet rs = indexScanOp.resultData();
+          rsList.add(rs);
+          defined = true;
+        }
+        break;
+
+      case NOT:
+      case SUBSTRING:
+      case EXTENSIBLE_MATCH:
+      default:
+        //NYI
+        break;
+    }
+
+    return defined;
+  }
+
+  /**
+   * Close index filter.
+   */
+  public void close() {
+    ndbTxn = null;
+    txn = null;
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/NDBException.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/NDBException.java
new file mode 100644
index 0000000..bf84b1c
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/NDBException.java
@@ -0,0 +1,76 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.backends.ndb;
+
+
+
+import org.opends.server.types.IdentifiedException;
+import org.opends.messages.Message;
+
+
+/**
+ * This class defines an exception that may be thrown if a problem occurs in
+ * the NDB backend database.
+ */
+public class NDBException
+     extends IdentifiedException
+{
+  /**
+   * The serial version identifier required to satisfy the compiler because this
+   * class extends <CODE>java.lang.Exception</CODE>, which implements the
+   * <CODE>java.io.Serializable</CODE> interface.  This value was generated
+   * using the <CODE>serialver</CODE> command-line utility included with the
+   * Java SDK.
+   */
+  static final long serialVersionUID = 3110979454298870834L;
+
+
+
+  /**
+   * Creates a new NDB backend exception with the provided message.
+   *
+   * @param  message    The message that explains the problem that occurred.
+   */
+  public NDBException(Message message)
+  {
+    super(message);
+  }
+
+
+
+  /**
+   * Creates a new NDB backend exception with the provided message and root
+   * cause.
+   *
+   * @param  message    The message that explains the problem that occurred.
+   * @param  cause      The exception that was caught to trigger this exception.
+   */
+  public NDBException(Message message, Throwable cause)
+  {
+    super(message, cause);
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/OperationContainer.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/OperationContainer.java
new file mode 100644
index 0000000..2b263be
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/OperationContainer.java
@@ -0,0 +1,1708 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.backends.ndb;
+
+import com.mysql.cluster.ndbj.Ndb;
+import com.mysql.cluster.ndbj.NdbApiException;
+import com.mysql.cluster.ndbj.NdbBlob;
+import com.mysql.cluster.ndbj.NdbIndexScanOperation;
+import com.mysql.cluster.ndbj.NdbOperation;
+import com.mysql.cluster.ndbj.NdbOperation.AbortOption;
+import com.mysql.cluster.ndbj.NdbResultSet;
+import com.mysql.cluster.ndbj.NdbTransaction;
+import com.mysql.cluster.ndbj.NdbTransaction.ExecType;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.types.DN;
+import org.opends.server.loggers.debug.DebugTracer;
+import org.opends.server.types.Attribute;
+import org.opends.server.types.AttributeBuilder;
+import org.opends.server.types.AttributeType;
+import org.opends.server.types.AttributeValue;
+import org.opends.server.types.AttributeValues;
+import org.opends.server.types.ByteString;
+import org.opends.server.types.DirectoryException;
+import org.opends.server.types.Entry;
+import org.opends.server.types.ObjectClass;
+import org.opends.server.types.ObjectClassType;
+
+import static org.opends.server.util.ServerConstants.ATTR_REFERRAL_URL;
+import static org.opends.server.loggers.debug.DebugLogger.*;
+
+/**
+ * This class represents the DN database, which has one record for each entry.
+ */
+public class OperationContainer extends DatabaseContainer
+{
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+  /**
+   * The default name of the ordered index for the primary key.
+   */
+  private static final String PRIMARY_INDEX_NAME = "PRIMARY";
+
+  /**
+   * The name of extensible object objectclass.
+   */
+  private static final String OC_EXTENSIBLEOBJECT = "extensibleObject";
+
+  /**
+   * Lowest multivalued attribute id.
+   */
+  private static final int MIN_MID = 1;
+
+  /**
+   * Create a OperationContainer instance for the database in a
+   * given entryContainer.
+   *
+   * @param name The name of the database.
+   * @param entryContainer The entryContainer of the database.
+   * @throws NdbApiException If an error occurs.
+   */
+  OperationContainer(String name, EntryContainer entryContainer)
+      throws NdbApiException
+  {
+    super(name, entryContainer);
+  }
+
+  /**
+   * Insert a new entry into the database.
+   * @param txn Abstract transaction to be used for the database operation.
+   * @param dn The entry DN.
+   * @param id The entry ID.
+   * @param entry The entry.
+   * @return true if the entry was inserted, false if a entry already exists.
+   * @throws NdbApiException If an error occurred while attempting to insert
+   * the new entry.
+   */
+  public boolean insert(AbstractTransaction txn, DN dn, long id, Entry entry)
+       throws NdbApiException
+  {
+    return writeNDBEntry(txn, dn, id, entry, false);
+  }
+
+  /**
+   * Put an entry to the database.  If an entry already exists, the entry
+   * will be replaced, otherwise a new entry will be inserted.
+   * @param txn Abstract transaction to be used for the database operation.
+   * @param dn The entry DN.
+   * @param id The entry ID.
+   * @param entry The new entry.
+   * @param originalEntry The old entry.
+   * @return true if the entry was written, false if it was not written.
+   * @throws NdbApiException If an error occurred while attempting to write
+   * the entry.
+   */
+  public boolean put(AbstractTransaction txn, DN dn, long id, Entry entry,
+                     Entry originalEntry)
+       throws NdbApiException
+  {
+    // Delete first.
+    deleteNDBEntry(txn, originalEntry, id);
+
+    return writeNDBEntry(txn, dn, id, entry, true);
+  }
+
+  /**
+   * Write an entry to the database.
+   * @param txn Abstract transaction to be used for the database operation.
+   * @param dn The entry DN.
+   * @param id The entry ID.
+   * @param entry The entry.
+   * @param overwrite Whether or not the entry should be overwritten.
+   * @return true if the entry was written, false if it was not written.
+   * @throws com.mysql.cluster.ndbj.NdbApiException If an error occurred
+   * while attempting to write the entry.
+   */
+  private boolean writeNDBEntry(AbstractTransaction txn, DN dn,
+    long id, Entry entry, boolean overwrite)
+    throws NdbApiException
+  {
+    int nClasses = 0;
+    NdbOperation op = null;
+    NdbOperation tagOp = null;
+    StringBuilder ocBuffer = new StringBuilder();
+    StringBuilder xocBuffer = new StringBuilder();
+    Map<ObjectClass, String> ocMap = entry.getObjectClasses();
+    Map<AttributeType, List<Attribute>> userAttrMap =
+      entry.getUserAttributes();
+    ArrayList<AttributeType> userAttributes =
+      new ArrayList<AttributeType>();
+
+    boolean extensibleObject = false;
+
+    // Update ocs tables.
+    NdbTransaction ndbDATxn = null;
+    for (Map.Entry<ObjectClass, String> ocEntry : ocMap.entrySet()) {
+      ObjectClass oc = ocEntry.getKey();
+      String ocName = oc.getNameOrOID();
+
+      Map<Integer, NdbOperation> mvOpMap =
+        new HashMap<Integer, NdbOperation>();
+
+      if (nClasses > 0) {
+        ocBuffer.append(" ");
+      }
+      ocBuffer.append(ocName);
+      nClasses++;
+
+      if (oc.getObjectClassType() == ObjectClassType.ABSTRACT) {
+        continue;
+      }
+
+      if (ocName.equalsIgnoreCase(OC_EXTENSIBLEOBJECT)) {
+        extensibleObject = true;
+      }
+
+      if (ndbDATxn == null) {
+        ndbDATxn = txn.getNdbDATransaction(ocName, id);
+      }
+      if (overwrite) {
+        op = ndbDATxn.getWriteOperation(ocName);
+      } else {
+        op = ndbDATxn.getInsertOperation(ocName);
+      }
+      op.equalLong(BackendImpl.EID, id);
+      op.equalInt(BackendImpl.MID, MIN_MID);
+      mvOpMap.put(MIN_MID, op);
+
+      for (AttributeType reqAttr : oc.getRequiredAttributes()) {
+        if (userAttributes.contains(reqAttr)) {
+          continue;
+        }
+        if (reqAttr.isOperational()) {
+          userAttrMap.put(reqAttr, entry.getOperationalAttribute(reqAttr));
+        }
+        String attrName = reqAttr.getNameOrOID();
+        if (entry.hasAttribute(reqAttr)) {
+          boolean indexed = BackendImpl.indexes.contains(attrName);
+          List<Attribute> attrList = userAttrMap.get(reqAttr);
+          int mid = MIN_MID;
+          for (Attribute attr : attrList) {
+            if (attr.isVirtual() || attr.isEmpty()) {
+              continue;
+            }
+            // Attribute options.
+            Set<String> attrOptionsSet = attr.getOptions();
+            if (!attrOptionsSet.isEmpty()) {
+              if (overwrite) {
+                tagOp =
+                  ndbDATxn.getWriteOperation(BackendImpl.TAGS_TABLE);
+              } else {
+                tagOp =
+                  ndbDATxn.getInsertOperation(BackendImpl.TAGS_TABLE);
+              }
+              tagOp.equalLong(BackendImpl.EID, id);
+              tagOp.equalString(BackendImpl.TAG_ATTR, attrName);
+              tagOp.equalInt(BackendImpl.MID, mid);
+              StringBuilder buffer = new StringBuilder();
+              for (String option : attrOptionsSet) {
+                buffer.append(';');
+                buffer.append(option);
+              }
+              tagOp.setString(BackendImpl.TAG_TAGS, buffer.toString());
+            }
+            for (AttributeValue attrVal : attr) {
+              String attrStringVal = attrVal.toString();
+              NdbOperation attrOp = mvOpMap.get(mid);
+              if (attrOp == null) {
+                if (overwrite) {
+                  attrOp = ndbDATxn.getWriteOperation(ocName);
+                } else {
+                  attrOp = ndbDATxn.getInsertOperation(ocName);
+                }
+                attrOp.equalLong(BackendImpl.EID, id);
+                attrOp.equalInt(BackendImpl.MID, mid);
+                mvOpMap.put(mid, attrOp);
+              }
+              if (BackendImpl.blobAttributes.contains(attrName)) {
+                NdbBlob blob = attrOp.getBlobHandle(attrName);
+                blob.setValue(attrVal.getValue().toByteArray());
+              } else {
+                attrOp.setString(attrName, attrStringVal);
+              }
+              // Update Indexes.
+              if (indexed) {
+                NdbOperation idxOp = null;
+                if (overwrite) {
+                  idxOp = ndbDATxn.getWriteOperation(
+                    BackendImpl.IDX_TABLE_PREFIX + attrName);
+                } else {
+                  idxOp = ndbDATxn.getInsertOperation(
+                    BackendImpl.IDX_TABLE_PREFIX + attrName);
+                }
+                idxOp.equalLong(BackendImpl.EID, id);
+                idxOp.equalInt(BackendImpl.MID, mid);
+                idxOp.setString(BackendImpl.IDX_VAL, attrStringVal);
+              }
+              mid++;
+            }
+          }
+          userAttributes.add(reqAttr);
+        }
+      }
+
+      for (AttributeType optAttr : oc.getOptionalAttributes()) {
+        if (userAttributes.contains(optAttr)) {
+          continue;
+        }
+        if (optAttr.isOperational()) {
+          userAttrMap.put(optAttr, entry.getOperationalAttribute(optAttr));
+        }
+        String attrName = optAttr.getNameOrOID();
+        if (entry.hasAttribute(optAttr)) {
+          boolean indexed = BackendImpl.indexes.contains(attrName);
+          List<Attribute> attrList = userAttrMap.get(optAttr);
+          int mid = MIN_MID;
+          for (Attribute attr : attrList) {
+            if (attr.isVirtual() || attr.isEmpty()) {
+              continue;
+            }
+            // Attribute options.
+            Set<String> attrOptionsSet = attr.getOptions();
+            if (!attrOptionsSet.isEmpty()) {
+              if (overwrite) {
+                tagOp =
+                  ndbDATxn.getWriteOperation(BackendImpl.TAGS_TABLE);
+              } else {
+                tagOp =
+                  ndbDATxn.getInsertOperation(BackendImpl.TAGS_TABLE);
+              }
+              tagOp.equalLong(BackendImpl.EID, id);
+              tagOp.equalString(BackendImpl.TAG_ATTR, attrName);
+              tagOp.equalInt(BackendImpl.MID, mid);
+              StringBuilder buffer = new StringBuilder();
+              for (String option : attrOptionsSet) {
+                buffer.append(';');
+                buffer.append(option);
+              }
+              tagOp.setString(BackendImpl.TAG_TAGS, buffer.toString());
+            }
+            for (AttributeValue attrVal : attr) {
+              String attrStringVal = attrVal.toString();
+              NdbOperation attrOp = mvOpMap.get(mid);
+              if (attrOp == null) {
+                if (overwrite) {
+                  attrOp = ndbDATxn.getWriteOperation(ocName);
+                } else {
+                  attrOp = ndbDATxn.getInsertOperation(ocName);
+                }
+                attrOp.equalLong(BackendImpl.EID, id);
+                attrOp.equalInt(BackendImpl.MID, mid);
+                mvOpMap.put(mid, attrOp);
+              }
+              if (BackendImpl.blobAttributes.contains(attrName)) {
+                NdbBlob blob = attrOp.getBlobHandle(attrName);
+                blob.setValue(attrVal.getValue().toByteArray());
+              } else {
+                attrOp.setString(attrName, attrStringVal);
+              }
+              // Update Indexes.
+              if (indexed) {
+                NdbOperation idxOp = null;
+                if (overwrite) {
+                  idxOp = ndbDATxn.getWriteOperation(
+                    BackendImpl.IDX_TABLE_PREFIX + attrName);
+                } else {
+                  idxOp = ndbDATxn.getInsertOperation(
+                    BackendImpl.IDX_TABLE_PREFIX + attrName);
+                }
+                idxOp.equalLong(BackendImpl.EID, id);
+                idxOp.equalInt(BackendImpl.MID, mid);
+                idxOp.setString(BackendImpl.IDX_VAL, attrStringVal);
+              }
+              mid++;
+            }
+          }
+          userAttributes.add(optAttr);
+        }
+      }
+    }
+
+    // Extensible object.
+    if (extensibleObject) {
+      int xnClasses = 0;
+      for (Map.Entry<AttributeType, List<Attribute>> attrEntry :
+           userAttrMap.entrySet())
+      {
+        AttributeType attrType = attrEntry.getKey();
+        if (!userAttributes.contains(attrType)) {
+          String attrName = attrType.getNameOrOID();
+          String ocName = BackendImpl.attr2Oc.get(attrName);
+          Map<Integer, NdbOperation> mvOpMap =
+            new HashMap<Integer, NdbOperation>();
+          boolean indexed = BackendImpl.indexes.contains(attrName);
+
+          if (ndbDATxn == null) {
+            ndbDATxn = txn.getNdbDATransaction(ocName, id);
+          }
+          if (overwrite) {
+            op = ndbDATxn.getWriteOperation(ocName);
+          } else {
+            op = ndbDATxn.getInsertOperation(ocName);
+          }
+          op.equalLong(BackendImpl.EID, id);
+          op.equalInt(BackendImpl.MID, MIN_MID);
+          mvOpMap.put(MIN_MID, op);
+
+          List<Attribute> attrList = userAttrMap.get(attrType);
+          int mid = MIN_MID;
+          for (Attribute attr : attrList) {
+            if (attr.isVirtual() || attr.isEmpty()) {
+              continue;
+            }
+            // Attribute options.
+            Set<String> attrOptionsSet = attr.getOptions();
+            if (!attrOptionsSet.isEmpty()) {
+              if (overwrite) {
+                tagOp =
+                  ndbDATxn.getWriteOperation(BackendImpl.TAGS_TABLE);
+              } else {
+                tagOp =
+                  ndbDATxn.getInsertOperation(BackendImpl.TAGS_TABLE);
+              }
+              tagOp.equalLong(BackendImpl.EID, id);
+              tagOp.equalString(BackendImpl.TAG_ATTR, attrName);
+              tagOp.equalInt(BackendImpl.MID, mid);
+              StringBuilder buffer = new StringBuilder();
+              for (String option : attrOptionsSet) {
+                buffer.append(';');
+                buffer.append(option);
+              }
+              tagOp.setString(BackendImpl.TAG_TAGS, buffer.toString());
+            }
+            for (AttributeValue attrVal : attr) {
+              String attrStringVal = attrVal.toString();
+              NdbOperation attrOp = mvOpMap.get(mid);
+              if (attrOp == null) {
+                if (overwrite) {
+                  attrOp = ndbDATxn.getWriteOperation(ocName);
+                } else {
+                  attrOp = ndbDATxn.getInsertOperation(ocName);
+                }
+                attrOp.equalLong(BackendImpl.EID, id);
+                attrOp.equalInt(BackendImpl.MID, mid);
+                mvOpMap.put(mid, attrOp);
+              }
+              if (BackendImpl.blobAttributes.contains(attrName)) {
+                NdbBlob blob = attrOp.getBlobHandle(attrName);
+                blob.setValue(attrVal.getValue().toByteArray());
+              } else {
+                attrOp.setString(attrName, attrStringVal);
+              }
+              // Update Indexes.
+              if (indexed) {
+                NdbOperation idxOp = null;
+                if (overwrite) {
+                  idxOp = ndbDATxn.getWriteOperation(
+                    BackendImpl.IDX_TABLE_PREFIX + attrName);
+                } else {
+                  idxOp = ndbDATxn.getInsertOperation(
+                    BackendImpl.IDX_TABLE_PREFIX + attrName);
+                }
+                idxOp.equalLong(BackendImpl.EID, id);
+                idxOp.equalInt(BackendImpl.MID, mid);
+                idxOp.setString(BackendImpl.IDX_VAL, attrStringVal);
+              }
+              mid++;
+            }
+          }
+          userAttributes.add(attrType);
+
+          if (xnClasses > 0) {
+            xocBuffer.append(" ");
+          }
+          xocBuffer.append(ocName);
+          xnClasses++;
+        }
+      }
+    }
+
+    // Update operational attributes table.
+    if (overwrite) {
+      op = ndbDATxn.getWriteOperation(BackendImpl.OPATTRS_TABLE);
+    } else {
+      op = ndbDATxn.getInsertOperation(BackendImpl.OPATTRS_TABLE);
+    }
+    op.equalLong(BackendImpl.EID, id);
+    for (List<Attribute> attrList :
+         entry.getOperationalAttributes().values())
+    {
+      for (Attribute attr : attrList) {
+        if (attr.isVirtual() || attr.isEmpty()) {
+          continue;
+        }
+        if (userAttrMap.containsKey(attr.getAttributeType())) {
+          continue;
+        }
+        String attrName = attr.getAttributeType().getNameOrOID();
+        for (AttributeValue attrVal : attr) {
+          op.setString(attrName, attrVal.toString());
+        }
+      }
+    }
+
+    // Update dn2id table.
+    NdbTransaction ndbTxn = txn.getNdbTransaction();
+    if (overwrite) {
+      op = ndbTxn.getWriteOperation(name);
+    } else {
+      op = ndbTxn.getInsertOperation(name);
+    }
+
+    int componentIndex = dn.getNumComponents() - 1;
+    for (int i=0; i < BackendImpl.DN2ID_DN_NC; i++) {
+      while (componentIndex >= 0) {
+        op.equalString(BackendImpl.DN2ID_DN + Integer.toString(i),
+          dn.getRDN(componentIndex).toNormalizedString());
+        componentIndex--;
+        i++;
+      }
+      op.equalString(BackendImpl.DN2ID_DN +
+        Integer.toString(i), "");
+    }
+
+    op.setLong(BackendImpl.EID, id);
+
+    op.setString(BackendImpl.DN2ID_OC, ocBuffer.toString());
+
+    op.setString(BackendImpl.DN2ID_XOC, xocBuffer.toString());
+
+    return true;
+  }
+
+  /**
+   * Delete an entry from the database.
+   * @param txn Abstract transaction to be used for the database operation.
+   * @param originalEntry The original entry.
+   * @param id The entry ID.
+   * @throws com.mysql.cluster.ndbj.NdbApiException If an error occurred
+   * while attempting to write the entry.
+   */
+  private void deleteNDBEntry(AbstractTransaction txn,
+    Entry originalEntry, long id) throws NdbApiException
+  {
+    NdbOperation op = null;
+    NdbOperation tagOp = null;
+    NdbTransaction ndbDATxn = null;
+    boolean extensibleObject = false;
+
+    // Delete attributes.
+    Map<ObjectClass, String> originalOcMap =
+      originalEntry.getObjectClasses();
+    ArrayList<AttributeType> originalUserAttributes =
+      new ArrayList<AttributeType>();
+    Map<AttributeType, List<Attribute>> originalUserAttrMap =
+      originalEntry.getUserAttributes();
+
+    for (Map.Entry<ObjectClass, String> ocEntry : originalOcMap.entrySet()) {
+      ObjectClass oc = ocEntry.getKey();
+      String ocName = oc.getNameOrOID();
+      Map<Integer, NdbOperation> mvOpMap =
+        new HashMap<Integer, NdbOperation>();
+
+      if (oc.getObjectClassType() == ObjectClassType.ABSTRACT) {
+        continue;
+      }
+
+      if (ocName.equalsIgnoreCase(OC_EXTENSIBLEOBJECT)) {
+        extensibleObject = true;
+      }
+
+      if (ndbDATxn == null) {
+        ndbDATxn = txn.getNdbDATransaction(ocName, id);
+      }
+      op = ndbDATxn.getDeleteOperation(ocName);
+      op.equalLong(BackendImpl.EID, id);
+      op.equalInt(BackendImpl.MID, MIN_MID);
+      mvOpMap.put(MIN_MID, op);
+
+      for (AttributeType reqAttr : oc.getRequiredAttributes()) {
+        String attrName = reqAttr.getNameOrOID();
+        if (originalUserAttributes.contains(reqAttr)) {
+          continue;
+        }
+        if (originalEntry.hasUserAttribute(reqAttr)) {
+          boolean indexed = BackendImpl.indexes.contains(attrName);
+          List<Attribute> attrList = originalUserAttrMap.get(reqAttr);
+          int mid = MIN_MID;
+          for (Attribute attr : attrList) {
+            if (attr.isVirtual() || attr.isEmpty()) {
+              continue;
+            }
+            // Attribute options.
+            Set<String> attrOptionsSet = attr.getOptions();
+            if (!attrOptionsSet.isEmpty()) {
+              tagOp =
+                ndbDATxn.getDeleteOperation(BackendImpl.TAGS_TABLE);
+              tagOp.equalLong(BackendImpl.EID, id);
+              tagOp.equalString(BackendImpl.TAG_ATTR, attrName);
+              tagOp.equalInt(BackendImpl.MID, mid);
+            }
+            for (AttributeValue attrVal : attr) {
+              NdbOperation attrOp = mvOpMap.get(mid);
+              if (attrOp == null) {
+                attrOp = ndbDATxn.getDeleteOperation(ocName);
+                attrOp.equalLong(BackendImpl.EID, id);
+                attrOp.equalInt(BackendImpl.MID, mid);
+                mvOpMap.put(mid, attrOp);
+              }
+              // Update Indexes.
+              if (indexed) {
+                NdbOperation idxOp = ndbDATxn.getDeleteOperation(
+                  BackendImpl.IDX_TABLE_PREFIX + attrName);
+                idxOp.equalLong(BackendImpl.EID, id);
+                idxOp.equalInt(BackendImpl.MID, mid);
+              }
+              mid++;
+            }
+          }
+          originalUserAttributes.add(reqAttr);
+        }
+      }
+
+      for (AttributeType optAttr : oc.getOptionalAttributes()) {
+        String attrName = optAttr.getNameOrOID();
+        if (originalUserAttributes.contains(optAttr)) {
+          continue;
+        }
+        if (originalEntry.hasUserAttribute(optAttr)) {
+          boolean indexed = BackendImpl.indexes.contains(attrName);
+          List<Attribute> attrList = originalUserAttrMap.get(optAttr);
+          int mid = MIN_MID;
+          for (Attribute attr : attrList) {
+            if (attr.isVirtual() || attr.isEmpty()) {
+              continue;
+            }
+            // Attribute options.
+            Set<String> attrOptionsSet = attr.getOptions();
+            if (!attrOptionsSet.isEmpty()) {
+              tagOp =
+                ndbDATxn.getDeleteOperation(BackendImpl.TAGS_TABLE);
+              tagOp.equalLong(BackendImpl.EID, id);
+              tagOp.equalString(BackendImpl.TAG_ATTR, attrName);
+              tagOp.equalInt(BackendImpl.MID, mid);
+            }
+            for (AttributeValue attrVal : attr) {
+              NdbOperation attrOp = mvOpMap.get(mid);
+              if (attrOp == null) {
+                attrOp = ndbDATxn.getDeleteOperation(ocName);
+                attrOp.equalLong(BackendImpl.EID, id);
+                attrOp.equalInt(BackendImpl.MID, mid);
+                mvOpMap.put(mid, attrOp);
+              }
+              // Update Indexes.
+              if (indexed) {
+                NdbOperation idxOp = ndbDATxn.getDeleteOperation(
+                  BackendImpl.IDX_TABLE_PREFIX + attrName);
+                idxOp.equalLong(BackendImpl.EID, id);
+                idxOp.equalInt(BackendImpl.MID, mid);
+              }
+              mid++;
+            }
+          }
+          originalUserAttributes.add(optAttr);
+        }
+      }
+    }
+
+    // Extensible object.
+    if (extensibleObject) {
+      for (Map.Entry<AttributeType, List<Attribute>> attrEntry :
+           originalUserAttrMap.entrySet())
+      {
+        AttributeType attrType = attrEntry.getKey();
+        if (!originalUserAttributes.contains(attrType)) {
+          String attrName = attrType.getNameOrOID();
+          String ocName = BackendImpl.attr2Oc.get(attrName);
+          Map<Integer, NdbOperation> mvOpMap =
+            new HashMap<Integer, NdbOperation>();
+          boolean indexed = BackendImpl.indexes.contains(attrName);
+
+          if (ndbDATxn == null) {
+            ndbDATxn = txn.getNdbDATransaction(ocName, id);
+          }
+          op = ndbDATxn.getDeleteOperation(ocName);
+          op.equalLong(BackendImpl.EID, id);
+          op.equalInt(BackendImpl.MID, MIN_MID);
+          mvOpMap.put(MIN_MID, op);
+
+          List<Attribute> attrList = originalUserAttrMap.get(attrType);
+          int mid = MIN_MID;
+          for (Attribute attr : attrList) {
+            if (attr.isVirtual() || attr.isEmpty()) {
+              continue;
+            }
+            // Attribute options.
+            Set<String> attrOptionsSet = attr.getOptions();
+            if (!attrOptionsSet.isEmpty()) {
+              tagOp =
+                ndbDATxn.getDeleteOperation(BackendImpl.TAGS_TABLE);
+              tagOp.equalLong(BackendImpl.EID, id);
+              tagOp.equalString(BackendImpl.TAG_ATTR, attrName);
+              tagOp.equalInt(BackendImpl.MID, mid);
+            }
+            for (AttributeValue attrVal : attr) {
+              NdbOperation attrOp = mvOpMap.get(mid);
+              if (attrOp == null) {
+                attrOp = ndbDATxn.getDeleteOperation(ocName);
+                attrOp.equalLong(BackendImpl.EID, id);
+                attrOp.equalInt(BackendImpl.MID, mid);
+                mvOpMap.put(mid, attrOp);
+              }
+              // Update Indexes.
+              if (indexed) {
+                NdbOperation idxOp = ndbDATxn.getDeleteOperation(
+                  BackendImpl.IDX_TABLE_PREFIX + attrName);
+                idxOp.equalLong(BackendImpl.EID, id);
+                idxOp.equalInt(BackendImpl.MID, mid);
+              }
+              mid++;
+            }
+          }
+          originalUserAttributes.add(attrType);
+        }
+      }
+    }
+  }
+
+  /**
+   * Remove an entry from the database.
+   * @param txn Abstract transaction to be used for the database operation.
+   * @param entry The entry.
+   * @return true if the entry was removed, false if it was not removed.
+   * @throws NdbApiException If an error occurred while attempting to remove
+   * the entry.
+   */
+  public boolean remove(AbstractTransaction txn, Entry entry)
+       throws NdbApiException
+  {
+    DN dn = entry.getDN();
+
+    NdbResultSet rs = null;
+
+    NdbTransaction ndbTxn = txn.getNdbTransaction();
+
+    NdbOperation op = ndbTxn.getSelectOperation(name,
+      NdbOperation.LockMode.LM_CommittedRead);
+
+    boolean extensibleObject = false;
+
+    int componentIndex = dn.getNumComponents() - 1;
+    for (int i=0; i < BackendImpl.DN2ID_DN_NC; i++) {
+      while (componentIndex >= 0) {
+        op.equalString(BackendImpl.DN2ID_DN + Integer.toString(i),
+          dn.getRDN(componentIndex).toNormalizedString());
+        componentIndex--;
+        i++;
+      }
+      op.equalString(BackendImpl.DN2ID_DN +
+        Integer.toString(i), "");
+    }
+    op.getValue(BackendImpl.EID);
+    op.getValue(BackendImpl.DN2ID_OC);
+    op.getValue(BackendImpl.DN2ID_XOC);
+
+    rs = op.resultData();
+    ndbTxn.execute(ExecType.NoCommit, AbortOption.AO_IgnoreError, true);
+
+    long eid = 0;
+    NdbTransaction ndbDATxn = null;
+    String[] ocsStringArray = null;
+    String[] xocsStringArray = null;
+    List<NdbResultSet> ocRsList = new ArrayList<NdbResultSet>();
+    NdbIndexScanOperation indexScanOp = null;
+
+    if (rs.next()) {
+      eid = rs.getLong(BackendImpl.EID);
+      String ocsString = rs.getString(BackendImpl.DN2ID_OC);
+      ocsStringArray = ocsString.split(" ");
+
+      String xocsString = rs.getString(BackendImpl.DN2ID_XOC);
+      xocsStringArray = xocsString.split(" ");
+      if (xocsString.length() > 0) {
+        extensibleObject = true;
+      }
+
+      for (String ocName : ocsStringArray) {
+        ObjectClass oc =
+          DirectoryServer.getObjectClass(ocName, true);
+        if (oc.getObjectClassType() == ObjectClassType.ABSTRACT) {
+          continue;
+        }
+        if (ndbDATxn == null) {
+          ndbDATxn = txn.getNdbDATransaction(ocName, eid);
+        }
+        indexScanOp =
+          ndbDATxn.getSelectIndexScanOperation(PRIMARY_INDEX_NAME, ocName);
+        indexScanOp.setBoundLong(BackendImpl.EID,
+            NdbIndexScanOperation.BoundType.BoundEQ, eid);
+        indexScanOp.getValue(BackendImpl.MID);
+        ocRsList.add(indexScanOp.resultData());
+      }
+
+      // Extensible object.
+      if (extensibleObject) {
+        for (String xocName : xocsStringArray) {
+          ObjectClass xoc =
+            DirectoryServer.getObjectClass(xocName, true);
+          if (xoc.getObjectClassType() == ObjectClassType.ABSTRACT) {
+            continue;
+          }
+          if (ndbDATxn == null) {
+            ndbDATxn = txn.getNdbDATransaction(xocName, eid);
+          }
+          indexScanOp =
+            ndbDATxn.getSelectIndexScanOperation(PRIMARY_INDEX_NAME, xocName);
+          indexScanOp.setBoundLong(BackendImpl.EID,
+            NdbIndexScanOperation.BoundType.BoundEQ, eid);
+          indexScanOp.getValue(BackendImpl.MID);
+          ocRsList.add(indexScanOp.resultData());
+        }
+      }
+    }
+
+    // Attribute options.
+    if (ndbDATxn == null) {
+      ndbDATxn = txn.getNdbDATransaction(BackendImpl.TAGS_TABLE, eid);
+    }
+    indexScanOp = ndbDATxn.getSelectIndexScanOperation(PRIMARY_INDEX_NAME,
+      BackendImpl.TAGS_TABLE);
+    indexScanOp.setBoundLong(BackendImpl.EID,
+      NdbIndexScanOperation.BoundType.BoundEQ, eid);
+    indexScanOp.getValue(BackendImpl.TAG_ATTR);
+    indexScanOp.getValue(BackendImpl.MID);
+    NdbResultSet tagRs = indexScanOp.resultData();
+
+    ndbDATxn.execute(ExecType.NoCommit, AbortOption.AO_IgnoreError, true);
+
+    Iterator<NdbResultSet> rsIterator = ocRsList.iterator();
+    for (String ocName : ocsStringArray) {
+      ObjectClass oc =
+        DirectoryServer.getObjectClass(ocName, true);
+      if (oc.getObjectClassType() == ObjectClassType.ABSTRACT) {
+        continue;
+      }
+      NdbResultSet ocRs = rsIterator.next();
+      while (ocRs.next()) {
+        int mid = ocRs.getInt(BackendImpl.MID);
+        op = ndbDATxn.getDeleteOperation(ocName);
+        op.equalLong(BackendImpl.EID, eid);
+        op.equalInt(BackendImpl.MID, mid);
+      }
+    }
+
+    // Extensible object.
+    if (extensibleObject) {
+      for (String xocName : xocsStringArray) {
+        ObjectClass xoc =
+          DirectoryServer.getObjectClass(xocName, true);
+        if (xoc.getObjectClassType() == ObjectClassType.ABSTRACT) {
+          continue;
+        }
+        NdbResultSet ocRs = rsIterator.next();
+        while (ocRs.next()) {
+          int mid = ocRs.getInt(BackendImpl.MID);
+          op = ndbDATxn.getDeleteOperation(xocName);
+          op.equalLong(BackendImpl.EID, eid);
+          op.equalInt(BackendImpl.MID, mid);
+        }
+      }
+    }
+
+    // Operational attributes.
+    op = ndbDATxn.getDeleteOperation(BackendImpl.OPATTRS_TABLE);
+    op.equalLong(BackendImpl.EID, eid);
+
+    // Attribute options.
+    while (tagRs.next()) {
+      String attrName = tagRs.getString(BackendImpl.TAG_ATTR);
+      int mid = tagRs.getInt(BackendImpl.MID);
+      op = ndbDATxn.getDeleteOperation(BackendImpl.TAGS_TABLE);
+      op.equalLong(BackendImpl.EID, eid);
+      op.equalString(BackendImpl.TAG_ATTR, attrName);
+      op.equalInt(BackendImpl.MID, mid);
+    }
+
+    // Indexes.
+    for (String attrName : BackendImpl.indexes) {
+      AttributeType attributeType =
+              DirectoryServer.getAttributeType(
+              attrName.toLowerCase(), true);
+      if (entry.hasAttribute(attributeType)) {
+        List<Attribute> attrList =
+          entry.getAttribute(attributeType);
+        int mid = MIN_MID;
+        for (Attribute attr : attrList) {
+          for (AttributeValue attrVal : attr) {
+            NdbOperation idxOp = ndbDATxn.getDeleteOperation(
+              BackendImpl.IDX_TABLE_PREFIX + attrName);
+            idxOp.equalLong(BackendImpl.EID, eid);
+            idxOp.equalInt(BackendImpl.MID, mid);
+            mid++;
+          }
+        }
+      }
+    }
+
+    // dn2id.
+    op = ndbTxn.getDeleteOperation(name);
+    componentIndex = dn.getNumComponents() - 1;
+    for (int i=0; i < BackendImpl.DN2ID_DN_NC; i++) {
+      while (componentIndex >= 0) {
+        op.equalString(BackendImpl.DN2ID_DN + Integer.toString(i),
+          dn.getRDN(componentIndex).toNormalizedString());
+        componentIndex--;
+        i++;
+      }
+      op.equalString(BackendImpl.DN2ID_DN +
+        Integer.toString(i), "");
+    }
+
+    return true;
+  }
+
+  /**
+   * Fetch the entry for a given ID.
+   * @param txn Abstract transaction to be used for the database read.
+   * @param eid The ID for which the entry is desired.
+   * @param lockMode NDB locking mode for this operation.
+   * @return The entry, or null if the given ID is not in the database.
+   * @throws NdbApiException If an error occurs in the database.
+   * @throws DirectoryException If a problem occurs while trying to
+   *         retrieve the entry.
+   */
+  public Entry get(AbstractTransaction txn, long eid,
+    NdbOperation.LockMode lockMode)
+       throws NdbApiException, DirectoryException
+  {
+    NdbIndexScanOperation indexScanOp = null;
+    NdbResultSet rs = null;
+    DN dn = null;
+
+    NdbTransaction ndbTxn = txn.getNdbTransaction();
+
+    indexScanOp = ndbTxn.getSelectIndexScanOperation(
+      BackendImpl.EID, name, lockMode);
+    indexScanOp.setBoundLong(BackendImpl.EID,
+            NdbIndexScanOperation.BoundType.BoundEQ, eid);
+    for (int i = 0; i < BackendImpl.DN2ID_DN_NC; i++) {
+      indexScanOp.getValue(BackendImpl.DN2ID_DN +
+        Integer.toString(i));
+    }
+    indexScanOp.getValue(BackendImpl.DN2ID_OC);
+    indexScanOp.getValue(BackendImpl.DN2ID_XOC);
+
+    rs = indexScanOp.resultData();
+    ndbTxn.execute(ExecType.NoCommit, AbortOption.AO_IgnoreError, true);
+
+    if (rs.next()) {
+      StringBuilder dnBuffer = new StringBuilder();
+      int dnColumnIndex = BackendImpl.DN2ID_DN_NC - 1;
+      while (dnColumnIndex >= 0) {
+        String rdnString = rs.getString(BackendImpl.DN2ID_DN +
+          Integer.toString(dnColumnIndex));
+        if (rdnString.length() > 0) {
+          dnBuffer.append(rdnString);
+          if (dnColumnIndex > 0) {
+            dnBuffer.append(",");
+          }
+        }
+        dnColumnIndex--;
+      }
+      String dnString = dnBuffer.toString();
+      if (dnString.length() == 0) {
+        return null;
+      }
+      dn = DN.decode(dnString);
+      return getNDBEntry(txn, rs, dn, eid);
+    } else {
+      return null;
+    }
+  }
+
+
+  /**
+   * Fetch the entry for a given DN.
+   * @param txn Abstract transaction to be used for the database read.
+   * @param dn The DN for which the entry is desired.
+   * @param lockMode NDB locking mode for this operation.
+   * @return The entry, or null if the given DN is not in the database.
+   * @throws NdbApiException If an error occurs in the database.
+   */
+  public Entry get(AbstractTransaction txn, DN dn,
+    NdbOperation.LockMode lockMode) throws NdbApiException
+  {
+    NdbOperation op = null;
+    NdbResultSet rs = null;
+
+    NdbTransaction ndbTxn = txn.getNdbTransaction();
+
+    op = ndbTxn.getSelectOperation(name, lockMode);
+
+    int componentIndex = dn.getNumComponents() - 1;
+    for (int i=0; i < BackendImpl.DN2ID_DN_NC; i++) {
+      while (componentIndex >= 0) {
+        op.equalString(BackendImpl.DN2ID_DN + Integer.toString(i),
+          dn.getRDN(componentIndex).toNormalizedString());
+        componentIndex--;
+        i++;
+      }
+      op.equalString(BackendImpl.DN2ID_DN +
+        Integer.toString(i), "");
+    }
+    op.getValue(BackendImpl.EID);
+    op.getValue(BackendImpl.DN2ID_OC);
+    op.getValue(BackendImpl.DN2ID_XOC);
+
+    rs = op.resultData();
+    ndbTxn.execute(ExecType.NoCommit, AbortOption.AO_IgnoreError, true);
+
+    if (rs.next()) {
+      long eid = rs.getLong(BackendImpl.EID);
+      if (eid == 0) {
+        return null;
+      }
+      Entry entry = getNDBEntry(txn, rs, dn, eid);
+      if (entry != null) {
+        entry.setAttachment(eid);
+      }
+      return entry;
+    } else {
+      return null;
+    }
+  }
+
+  /**
+   * Get the entry from the database.
+   * @param txn Abstract transaction to be used for the database read.
+   * @param rs NDB results set from the initial get entry operation.
+   * @param dn The entry DN.
+   * @param id The entry ID.
+   * @return The entry.
+   * @throws NdbApiException If an error occurs in the database.
+   */
+  private Entry getNDBEntry(
+    AbstractTransaction txn,
+    NdbResultSet rs,
+    DN dn,
+    long eid) throws NdbApiException
+  {
+    NdbOperation op = null;
+    NdbIndexScanOperation indexScanOp = null;
+    boolean extensibleObject = false;
+
+    String ocsString = rs.getString(BackendImpl.DN2ID_OC);
+    String[] ocsStringArray = ocsString.split(" ");
+
+    String xocsString = rs.getString(BackendImpl.DN2ID_XOC);
+    String[] xocsStringArray = xocsString.split(" ");
+    if (xocsString.length() > 0) {
+      extensibleObject = true;
+    }
+    LinkedHashMap<ObjectClass, String> xObjectClasses =
+      new LinkedHashMap<ObjectClass, String>();
+
+    List<NdbResultSet> ocRsList = new ArrayList<NdbResultSet>();
+    Map<String, NdbBlob> blobMap = new HashMap<String, NdbBlob>();
+    LinkedHashMap<ObjectClass, String> objectClasses =
+      new LinkedHashMap<ObjectClass, String>(ocsStringArray.length);
+
+    NdbTransaction ndbDATxn = null;
+    NdbIndexScanOperation tagScanOp = null;
+
+    for (String ocName : ocsStringArray) {
+      ObjectClass oc =
+        DirectoryServer.getObjectClass(ocName, true);
+      objectClasses.put(oc, ocName);
+      if (oc.getObjectClassType() == ObjectClassType.ABSTRACT) {
+        continue;
+      }
+
+      if (ndbDATxn == null) {
+        ndbDATxn = txn.getNdbDATransaction(ocName, eid);
+      }
+
+      indexScanOp =
+        ndbDATxn.getSelectIndexScanOperation(
+        PRIMARY_INDEX_NAME, ocName,
+        NdbOperation.LockMode.LM_CommittedRead);
+      indexScanOp.setBoundLong(BackendImpl.EID,
+        NdbIndexScanOperation.BoundType.BoundEQ, eid);
+      indexScanOp.getValue(BackendImpl.MID);
+
+      for (AttributeType reqAttr : oc.getRequiredAttributes()) {
+        String attrName = reqAttr.getNameOrOID();
+        if (BackendImpl.blobAttributes.contains(attrName)) {
+          NdbBlob blob = indexScanOp.getBlobHandle(attrName);
+          blobMap.put(attrName, blob);
+        } else {
+          indexScanOp.getValue(attrName);
+        }
+      }
+      for (AttributeType optAttr : oc.getOptionalAttributes()) {
+        String attrName = optAttr.getNameOrOID();
+        if (BackendImpl.blobAttributes.contains(attrName)) {
+          NdbBlob blob = indexScanOp.getBlobHandle(attrName);
+          blobMap.put(attrName, blob);
+        } else {
+          indexScanOp.getValue(attrName);
+        }
+      }
+      ocRsList.add(indexScanOp.resultData());
+    }
+
+    // Extensible object.
+    if (extensibleObject) {
+      for (String xocName : xocsStringArray) {
+        ObjectClass xoc =
+          DirectoryServer.getObjectClass(xocName, true);
+        objectClasses.put(xoc, xocName);
+        xObjectClasses.put(xoc, xocName);
+        if (xoc.getObjectClassType() == ObjectClassType.ABSTRACT) {
+          continue;
+        }
+
+        if (ndbDATxn == null) {
+          ndbDATxn = txn.getNdbDATransaction(xocName, eid);
+        }
+
+        indexScanOp =
+          ndbDATxn.getSelectIndexScanOperation(
+          PRIMARY_INDEX_NAME, xocName,
+          NdbOperation.LockMode.LM_CommittedRead);
+        indexScanOp.setBoundLong(BackendImpl.EID,
+          NdbIndexScanOperation.BoundType.BoundEQ, eid);
+        indexScanOp.getValue(BackendImpl.MID);
+
+        for (AttributeType reqAttr : xoc.getRequiredAttributes()) {
+          String attrName = reqAttr.getNameOrOID();
+          if (BackendImpl.blobAttributes.contains(attrName)) {
+            NdbBlob blob = indexScanOp.getBlobHandle(attrName);
+            blobMap.put(attrName, blob);
+          } else {
+            indexScanOp.getValue(attrName);
+          }
+        }
+        for (AttributeType optAttr : xoc.getOptionalAttributes()) {
+          String attrName = optAttr.getNameOrOID();
+          if (BackendImpl.blobAttributes.contains(attrName)) {
+            NdbBlob blob = indexScanOp.getBlobHandle(attrName);
+            blobMap.put(attrName, blob);
+          } else {
+            indexScanOp.getValue(attrName);
+          }
+        }
+        ocRsList.add(indexScanOp.resultData());
+      }
+    }
+
+    // Operational attributes.
+    op = ndbDATxn.getSelectOperation(BackendImpl.OPATTRS_TABLE,
+      NdbOperation.LockMode.LM_CommittedRead);
+    op.equalLong(BackendImpl.EID, eid);
+
+    for (String attrName : BackendImpl.operationalAttributes) {
+      op.getValue(attrName);
+    }
+    ocRsList.add(op.resultData());
+
+    // Attribute options.
+    tagScanOp = ndbDATxn.getSelectIndexScanOperation(
+      PRIMARY_INDEX_NAME,
+      BackendImpl.TAGS_TABLE,
+      NdbOperation.LockMode.LM_CommittedRead);
+    tagScanOp.setBoundLong(BackendImpl.EID,
+      NdbIndexScanOperation.BoundType.BoundEQ, eid);
+    tagScanOp.getValue(BackendImpl.TAG_ATTR);
+    tagScanOp.getValue(BackendImpl.MID);
+    tagScanOp.getValue(BackendImpl.TAG_TAGS);
+    NdbResultSet tagRs = tagScanOp.resultData();
+
+    ndbDATxn.execute(ExecType.NoCommit, AbortOption.AO_IgnoreError, true);
+
+    return decodeNDBEntry(dn, ocRsList, tagRs, objectClasses,
+      xObjectClasses, blobMap, extensibleObject);
+  }
+
+  /**
+   * Decode the entry from NDB results.
+   * @param dn The entry DN.
+   * @param ocRsList ObjectClass result sets list.
+   * @param tagRs Attribute tags result set.
+   * @param objectClasses ObjectClasses map.
+   * @param xObjectClasses Extensible ObjectClasses map.
+   * @param blobMap Blob attributes map.
+   * @param extensibleObject true if the entry is Extensible Object,
+   * false otherwise.
+   * @return The entry.
+   * @throws com.mysql.cluster.ndbj.NdbApiException
+   */
+  private Entry decodeNDBEntry(
+    DN dn,
+    List<NdbResultSet> ocRsList,
+    NdbResultSet tagRs,
+    Map<ObjectClass, String> objectClasses,
+    Map<ObjectClass, String> xObjectClasses,
+    Map<String, NdbBlob> blobMap,
+    boolean extensibleObject) throws NdbApiException
+  {
+    LinkedHashMap<AttributeType, List<Attribute>> userAttributes =
+      new LinkedHashMap<AttributeType, List<Attribute>>();
+    LinkedHashMap<AttributeType, List<Attribute>> opAttributes =
+      new LinkedHashMap<AttributeType, List<Attribute>>();
+
+    // Attribute options.
+    Map<String, Map<Integer, LinkedHashSet<String>>> attr2tagMap =
+      new HashMap<String, Map<Integer, LinkedHashSet<String>>>();
+    while (tagRs.next()) {
+      String attrName = tagRs.getString(BackendImpl.TAG_ATTR);
+      int mid = tagRs.getInt(BackendImpl.MID);
+      String attrOptions = tagRs.getString(BackendImpl.TAG_TAGS);
+      if (!tagRs.wasNull()) {
+        int currentIndex = attrOptions.indexOf(';');
+        int nextIndex = attrOptions.indexOf(';', currentIndex + 1);
+        String option = null;
+        Map<Integer, LinkedHashSet<String>> mid2tagMap =
+          attr2tagMap.get(attrName);
+        if (mid2tagMap == null) {
+          mid2tagMap = new HashMap<Integer, LinkedHashSet<String>>();
+        }
+        LinkedHashSet<String> options = new LinkedHashSet<String>();
+        while (nextIndex > 0) {
+          option =
+            attrOptions.substring(currentIndex + 1, nextIndex);
+          if (option.length() > 0) {
+            options.add(option);
+          }
+          currentIndex = nextIndex;
+          nextIndex = attrOptions.indexOf(';', currentIndex + 1);
+        }
+        option = attrOptions.substring(currentIndex + 1);
+        if (option.length() > 0) {
+          options.add(option);
+        }
+        mid2tagMap.put(mid, options);
+        attr2tagMap.put(attrName, mid2tagMap);
+      }
+    }
+
+    // Object classes and user atributes.
+    Iterator<NdbResultSet> ocRsIterator = ocRsList.iterator();
+    NdbResultSet ocRs = ocRsIterator.next();
+    AttributeBuilder attrBuilder = new AttributeBuilder();
+    for (ObjectClass oc : objectClasses.keySet()) {
+      if (oc.getObjectClassType() == ObjectClassType.ABSTRACT) {
+        continue;
+      }
+      while (ocRs.next()) {
+        int mid = ocRs.getInt(BackendImpl.MID);
+        for (AttributeType reqAttr : oc.getRequiredAttributes()) {
+          String attrName = reqAttr.getNameOrOID();
+          byte[] attrValBytes = null;
+          NdbBlob blob = null;
+          if (BackendImpl.blobAttributes.contains(attrName)) {
+            blob = blobMap.get(attrName);
+          } else {
+            attrValBytes = ocRs.getStringBytes(attrName);
+            if (ocRs.wasNull()) {
+              continue;
+            }
+          }
+          AttributeType attributeType =
+            DirectoryServer.getAttributeType(
+            BackendImpl.attrName2LC.get(attrName), true);
+          List<Attribute> attrList = userAttributes.get(attributeType);
+          if (attrList == null) {
+            attrList = new ArrayList<Attribute>();
+          }
+          Attribute attr = null;
+          LinkedHashSet<String> options = null;
+          Map<Integer, LinkedHashSet<String>> mid2tagMap =
+            attr2tagMap.get(attrName);
+          if (mid2tagMap != null) {
+            options = mid2tagMap.get(mid);
+          }
+          if ((options == null) && !attrList.isEmpty()) {
+            attr = attrList.get(attrList.size() - 1);
+          }
+          if (attr == null) {
+            attrBuilder.setAttributeType(attributeType, attrName);
+          } else {
+            attrBuilder = new AttributeBuilder(attr);
+          }
+          if (blob != null) {
+            if (blob.getNull()) {
+              continue;
+            }
+            int len = blob.getLength().intValue();
+            byte[] buf = new byte[len];
+            blob.readData(buf, len);
+            attrBuilder.add(AttributeValues.create(attributeType,
+              ByteString.wrap(buf)));
+          } else {
+            attrBuilder.add(AttributeValues.create(attributeType,
+              ByteString.wrap(attrValBytes)));
+          }
+
+          // Create or update an attribute.
+          if (options != null) {
+            attrBuilder.setOptions(options);
+          }
+          attr = attrBuilder.toAttribute();
+          if (attrList.isEmpty()) {
+            attrList.add(attr);
+          } else {
+            attrList.set(attrList.size() - 1, attr);
+          }
+
+          userAttributes.put(attributeType, attrList);
+        }
+        for (AttributeType optAttr : oc.getOptionalAttributes()) {
+          String attrName = optAttr.getNameOrOID();
+          byte[] attrValBytes = null;
+          NdbBlob blob = null;
+          if (BackendImpl.blobAttributes.contains(attrName)) {
+            blob = blobMap.get(attrName);
+          } else {
+            attrValBytes = ocRs.getStringBytes(attrName);
+            if (ocRs.wasNull()) {
+              continue;
+            }
+          }
+          AttributeType attributeType =
+            DirectoryServer.getAttributeType(
+            BackendImpl.attrName2LC.get(attrName), true);
+          List<Attribute> attrList = userAttributes.get(attributeType);
+          if (attrList == null) {
+            attrList = new ArrayList<Attribute>();
+          }
+          Attribute attr = null;
+          LinkedHashSet<String> options = null;
+          Map<Integer, LinkedHashSet<String>> mid2tagMap =
+            attr2tagMap.get(attrName);
+          if (mid2tagMap != null) {
+            options = mid2tagMap.get(mid);
+          }
+          if ((options == null) && !attrList.isEmpty()) {
+            attr = attrList.get(attrList.size() - 1);
+          }
+          if (attr == null) {
+            attrBuilder.setAttributeType(attributeType, attrName);
+          } else {
+            attrBuilder = new AttributeBuilder(attr);
+          }
+          if (blob != null) {
+            if (blob.getNull()) {
+              continue;
+            }
+            int len = blob.getLength().intValue();
+            byte[] buf = new byte[len];
+            blob.readData(buf, len);
+            attrBuilder.add(AttributeValues.create(attributeType,
+              ByteString.wrap(buf)));
+          } else {
+            attrBuilder.add(AttributeValues.create(attributeType,
+              ByteString.wrap(attrValBytes)));
+          }
+
+          // Create or update an attribute.
+          if (options != null) {
+            attrBuilder.setOptions(options);
+          }
+          attr = attrBuilder.toAttribute();
+          if (attrList.isEmpty()) {
+            attrList.add(attr);
+          } else {
+            attrList.set(attrList.size() - 1, attr);
+          }
+
+          userAttributes.put(attributeType, attrList);
+        }
+      }
+      if (ocRsIterator.hasNext()) {
+        ocRs = ocRsIterator.next();
+      }
+    }
+
+    // Operational attributes.
+    if (ocRs.next()) {
+      for (String attrName : BackendImpl.operationalAttributes) {
+        byte[] attrValBytes = ocRs.getStringBytes(attrName);
+        if (ocRs.wasNull()) {
+          continue;
+        }
+        AttributeType attributeType =
+          DirectoryServer.getAttributeType(
+          BackendImpl.attrName2LC.get(attrName), true);
+        attrBuilder.setAttributeType(attributeType, attrName);
+        attrBuilder.add(AttributeValues.create(attributeType,
+          ByteString.wrap(attrValBytes)));
+        Attribute attr = attrBuilder.toAttribute();
+        List<Attribute> attrList = opAttributes.get(attributeType);
+        if (attrList == null) {
+          attrList = new ArrayList<Attribute>();
+          attrList.add(attr);
+          opAttributes.put(attributeType, attrList);
+        } else {
+          attrList.add(attr);
+        }
+      }
+    }
+
+    // Extensible object.
+    if (extensibleObject) {
+      for (ObjectClass oc : xObjectClasses.keySet()) {
+        objectClasses.remove(oc);
+      }
+    }
+
+    Entry entry = new Entry(dn, objectClasses, userAttributes, opAttributes);
+    if (entry != null) {
+      entry.processVirtualAttributes();
+    }
+    return entry;
+  }
+
+  /**
+   * Fetch the entry ID for a given DN.
+   * @param txn Abstract transaction to be used for the database read.
+   * @param dn The DN for which the entry ID is desired.
+   * @param lockMode The lock mode for this operation.
+   * @return The entry ID, or zero if the given DN is not in the database.
+   * @throws NdbApiException If an error occurs in the database.
+   */
+  public long getID(AbstractTransaction txn, DN dn,
+    NdbOperation.LockMode lockMode)
+       throws NdbApiException
+  {
+    NdbOperation op = null;
+    NdbResultSet rs = null;
+    long eid = 0;
+
+    NdbTransaction ndbTxn = txn.getNdbTransaction();
+
+    op = ndbTxn.getSelectOperation(name, lockMode);
+
+    int componentIndex = dn.getNumComponents() - 1;
+    for (int i=0; i < BackendImpl.DN2ID_DN_NC; i++) {
+      while (componentIndex >= 0) {
+        op.equalString(BackendImpl.DN2ID_DN + Integer.toString(i),
+          dn.getRDN(componentIndex).toNormalizedString());
+        componentIndex--;
+        i++;
+      }
+      op.equalString(BackendImpl.DN2ID_DN +
+        Integer.toString(i), "");
+    }
+
+    op.getValue(BackendImpl.EID);
+
+    rs = op.resultData();
+    ndbTxn.execute(ExecType.NoCommit, AbortOption.AO_IgnoreError, true);
+
+    if (rs.next()) {
+      eid = rs.getLong(BackendImpl.EID);
+    }
+
+    return eid;
+  }
+
+  /**
+   * Get referrals for a given entry ID.
+   * @param txn Abstract transaction to be used for the operation.
+   * @param id The ID for which the referral is desired.
+   * @return The referral set, or empty set if the entry has no referrals.
+   * @throws NdbApiException If an error occurs in the database.
+   */
+  public Set<String> getReferrals(AbstractTransaction txn, long id)
+       throws NdbApiException
+  {
+    NdbIndexScanOperation op = null;
+    NdbResultSet rs = null;
+    Set<String> referrals = new HashSet<String>();
+
+    NdbTransaction ndbDATxn =
+      txn.getNdbDATransaction(BackendImpl.REFERRALS_TABLE, id);
+
+    op = ndbDATxn.getSelectIndexScanOperation(
+      PRIMARY_INDEX_NAME, BackendImpl.REFERRALS_TABLE,
+      NdbOperation.LockMode.LM_CommittedRead);
+    op.setBoundLong(BackendImpl.EID,
+            NdbIndexScanOperation.BoundType.BoundEQ, id);
+
+    op.getValue(ATTR_REFERRAL_URL);
+    rs = op.resultData();
+
+    ndbDATxn.execute(ExecType.NoCommit, AbortOption.AO_IgnoreError, true);
+
+    while (rs.next()) {
+      String referral = rs.getString(ATTR_REFERRAL_URL);
+      if (rs.wasNull() || (referral.length() == 0)) {
+        break;
+      }
+      referrals.add(referral);
+    }
+
+    return referrals;
+  }
+
+  /**
+   * Get the count of rows in the database.
+   * @return The number of rows in the database.
+   * @throws NdbApiException If an error occurs in the database.
+   */
+  public long getRecordCount() throws NdbApiException
+  {
+    Ndb ndb = entryContainer.getRootContainer().getNDB();
+
+    try {
+      return ndb.selectCount(name);
+    } finally {
+      if (ndb != null) {
+        entryContainer.getRootContainer().releaseNDB(ndb);
+      }
+    }
+  }
+
+  /**
+   * Determine if the entry has subordinate entries.
+   * @param txn Abstract transaction to be used for the operation.
+   * @param dn The entry DN.
+   * @return true if the entry has subordinates, false otherwise.
+   * @throws com.mysql.cluster.ndbj.NdbApiException If an error
+   * occurs in the database.
+   */
+  public boolean hasSubordinates(AbstractTransaction txn, DN dn)
+    throws NdbApiException
+  {
+    // NdbInterpretedOperation op;
+    NdbIndexScanOperation op;
+    NdbResultSet rs;
+
+    NdbTransaction ndbTxn = txn.getNdbTransaction();
+
+    op = ndbTxn.getSelectIndexScanOperation(
+      PRIMARY_INDEX_NAME, name,
+      NdbOperation.LockMode.LM_Read);
+
+    int numComponents = dn.getNumComponents();
+    int componentIndex = numComponents - 1;
+    for (int i=0; i < numComponents; i++) {
+      op.setBoundString(BackendImpl.DN2ID_DN +
+        Integer.toString(i),
+        NdbIndexScanOperation.BoundType.BoundEQ,
+        dn.getRDN(componentIndex).toNormalizedString());
+      componentIndex--;
+    }
+
+    if (dn.getNumComponents() < BackendImpl.DN2ID_DN_NC) {
+      String nextRDNColumn =
+        BackendImpl.DN2ID_DN + Integer.toString(numComponents);
+      op.setBoundString(nextRDNColumn,
+        NdbIndexScanOperation.BoundType.BoundLT, "");
+    }
+
+    // FIXME: This is extremely inefficient, need NDB/J API
+    // like interpretExitLastRow to count result rows node-
+    // side without returning them here for check/count.
+    // op.interpretExitLastRow();
+    op.getValue(BackendImpl.EID);
+
+    rs = op.resultData();
+    ndbTxn.execute(ExecType.NoCommit, AbortOption.AO_IgnoreError, true);
+
+    if (rs.next()) {
+      return true;
+    }
+
+    return false;
+  }
+
+  /**
+   * Get a new instance of the Search Cursor object.
+   * @param txn Abstract Transaction to be used for the operation.
+   * @param baseDN Search Cursor base DN.
+   * @return New instance of the Search Cursor object.
+   */
+  public DN2IDSearchCursor getSearchCursor(
+    AbstractTransaction txn, DN baseDN) {
+    return new DN2IDSearchCursor(txn, baseDN);
+  }
+
+  /**
+   * This inner class represents the Search Cursor which can be
+   * used to cursor entries in the database starting from some
+   * arbitrary base DN.
+   */
+  protected class DN2IDSearchCursor
+  {
+    private NdbIndexScanOperation op;
+    private NdbResultSet rs;
+    private NdbTransaction ndbTxn;
+    private AbstractTransaction txn;
+    private DN baseDN;
+
+    /**
+     * Object constructor.
+     * @param txn Abstract Transaction to be used for the operation.
+     * @param baseDN Search Cursor base DN.
+     */
+    public DN2IDSearchCursor(
+      AbstractTransaction txn,
+      DN baseDN)
+    {
+      this.txn = txn;
+      this.baseDN = baseDN;
+    }
+
+    /**
+     * Open the cursor.
+     * @throws com.mysql.cluster.ndbj.NdbApiException If an error
+     * occurs in the database.
+     */
+    public void open() throws NdbApiException
+    {
+      ndbTxn = txn.getNdbTransaction();
+
+      op = ndbTxn.getSelectIndexScanOperation(
+        PRIMARY_INDEX_NAME, name, NdbOperation.LockMode.LM_CommittedRead);
+
+      int numComponents = baseDN.getNumComponents();
+      int componentIndex = numComponents - 1;
+      for (int i = 0; i < numComponents; i++) {
+        op.setBoundString(BackendImpl.DN2ID_DN +
+          Integer.toString(i),
+          NdbIndexScanOperation.BoundType.BoundEQ,
+          baseDN.getRDN(componentIndex).toNormalizedString());
+        componentIndex--;
+      }
+
+      if (baseDN.getNumComponents() < BackendImpl.DN2ID_DN_NC) {
+        String nextRDNColumn =
+          BackendImpl.DN2ID_DN + Integer.toString(numComponents);
+        op.setBoundString(nextRDNColumn,
+          NdbIndexScanOperation.BoundType.BoundLT, "");
+      }
+
+      op.getValue(BackendImpl.EID);
+
+      for (int i = 0; i < BackendImpl.DN2ID_DN_NC; i++) {
+        op.getValue(BackendImpl.DN2ID_DN +
+          Integer.toString(i));
+      }
+
+      rs = op.resultData();
+      ndbTxn.execute(ExecType.NoCommit, AbortOption.AbortOnError, true);
+    }
+
+    /**
+     * Advance one position and return the result.
+     * @return An instance of Search Cursor Result.
+     * @throws com.mysql.cluster.ndbj.NdbApiException If an error
+     * occurs in the database.
+     */
+    public SearchCursorResult getNext() throws NdbApiException
+    {
+      if (rs.next()) {
+        long eid = rs.getLong(BackendImpl.EID);
+
+        StringBuilder dnBuffer = new StringBuilder();
+        int dnColumnIndex = BackendImpl.DN2ID_DN_NC - 1;
+        while (dnColumnIndex >= 0) {
+          String rdnString = rs.getString(BackendImpl.DN2ID_DN +
+            Integer.toString(dnColumnIndex));
+          if (rdnString.length() > 0) {
+            dnBuffer.append(rdnString);
+            if (dnColumnIndex > 0) {
+              dnBuffer.append(",");
+            }
+          }
+          dnColumnIndex--;
+        }
+        String dnString = dnBuffer.toString();
+
+        if ((eid == 0) || (dnString.length() == 0)) {
+          return null;
+        }
+
+        SearchCursorResult result =
+          new SearchCursorResult(dnString, eid);
+        return result;
+      }
+      return null;
+    }
+
+    /**
+     * Close the cursor.
+     */
+    public void close()
+    {
+      ndbTxn = null;
+      txn = null;
+    }
+  }
+
+  /**
+   * This inner class represents a Search Cursor Result
+   * as returned by the Search Cursor operations.
+   */
+  protected class SearchCursorResult
+  {
+    /**
+     * Entry DN.
+     */
+    public String dn;
+
+    /**
+     * Entry ID.
+     */
+    public long id;
+
+    /**
+     * Object constructor.
+     * @param dn The entry DN.
+     * @param id The entry ID.
+     */
+    public SearchCursorResult(String dn, long id)
+    {
+      this.dn = dn;
+      this.id = id;
+    }
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/RootContainer.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/RootContainer.java
new file mode 100644
index 0000000..a65c119
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/RootContainer.java
@@ -0,0 +1,499 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.backends.ndb;
+import com.mysql.cluster.ndbj.Ndb;
+import com.mysql.cluster.ndbj.NdbApiException;
+import com.mysql.cluster.ndbj.NdbApiTemporaryException;
+import com.mysql.cluster.ndbj.NdbClusterConnection;
+import org.opends.messages.Message;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.*;
+import java.util.concurrent.LinkedBlockingQueue;
+import org.opends.server.types.DN;
+import org.opends.server.types.ConfigChangeResult;
+import org.opends.server.types.ResultCode;
+import org.opends.server.api.Backend;
+import org.opends.server.admin.server.ConfigurationChangeListener;
+import org.opends.server.admin.std.server.NdbBackendCfg;
+import org.opends.server.config.ConfigException;
+import static org.opends.server.loggers.ErrorLogger.logError;
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import static org.opends.messages.NdbMessages.*;
+import org.opends.server.loggers.debug.DebugTracer;
+import org.opends.server.types.DebugLogLevel;
+
+/**
+ * Root container holds all the entry containers for each base DN.
+ * It also maintains all the openings and closings of the entry
+ * containers.
+ */
+public class RootContainer
+     implements ConfigurationChangeListener<NdbBackendCfg>
+{
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+  /**
+   * The backend configuration.
+   */
+  private NdbBackendCfg config;
+
+  /**
+   * The backend to which this entry root container belongs.
+   */
+  private Backend backend;
+
+  /**
+   * The base DNs contained in this entryContainer.
+   */
+  private ConcurrentHashMap<DN, EntryContainer> entryContainers;
+
+  /**
+   * NDB connection objects.
+   */
+  private static NdbClusterConnection[] ndbConns;
+
+  /**
+   * NDB handle objects.
+   */
+  private static LinkedBlockingQueue<Ndb> ndbQueue;
+
+  /**
+   * NDB thread count.
+   */
+  private int ndbThreadCount;
+
+  /**
+   * NDB number of connections.
+   */
+  private int ndbNumConnections;
+
+  /**
+   * The range to use when requesting next ID.
+   */
+  private static final long NDB_NEXTID_RANGE = 1000;
+
+  /**
+   * The maximum number of NDB threads.
+   */
+  private static final int NDB_MAX_THREAD_COUNT = 128;
+
+  /**
+   * Timeout for the first node/group to become ready.
+   */
+  private static final int NDB_TIMEOUT_FIRST_ALIVE = 60;
+
+  /**
+   * Timeout for the rest of nodes/groups to become ready.
+   */
+  private static final int NDB_TIMEOUT_AFTER_FIRST_ALIVE = 60;
+
+
+
+  /**
+   * Creates a new RootContainer object.
+   *
+   * @param config The configuration of the NDB backend.
+   * @param backend A reference to the NDB backend that is creating this
+   *                root container.
+   */
+  public RootContainer(Backend backend, NdbBackendCfg config)
+  {
+    this.entryContainers = new ConcurrentHashMap<DN, EntryContainer>();
+    this.backend = backend;
+    this.config = config;
+
+    this.ndbNumConnections = this.config.getNdbNumConnections();
+    this.ndbConns = new NdbClusterConnection[ndbNumConnections];
+
+    this.ndbThreadCount = this.config.getNdbThreadCount();
+    if (this.ndbThreadCount > NDB_MAX_THREAD_COUNT) {
+      this.ndbThreadCount = NDB_MAX_THREAD_COUNT;
+    }
+
+    this.ndbQueue = new LinkedBlockingQueue<Ndb>(
+      this.ndbThreadCount);
+
+    config.addNdbChangeListener(this);
+  }
+
+  /**
+   * Opens the root container using the NDB configuration object provided.
+   *
+   * @throws NdbApiException If an error occurs when opening.
+   * @throws ConfigException If an configuration error occurs while opening.
+   * @throws Exception If an unknown error occurs when opening.
+   */
+  public void open()
+      throws NdbApiException, ConfigException, Exception
+  {
+    // Log a message indicating upcoming NDB connect.
+    logError(NOTE_NDB_WAITING_FOR_CLUSTER.get());
+
+    // Connect to the cluster.
+    for (int i = 0; i < this.ndbNumConnections; i++) {
+      try {
+        this.ndbConns[i] = NdbClusterConnection.create(
+          this.config.getNdbConnectString());
+        this.ndbConns[i].connect(5, 3, true);
+        this.ndbConns[i].waitUntilReady(NDB_TIMEOUT_FIRST_ALIVE,
+          NDB_TIMEOUT_AFTER_FIRST_ALIVE);
+      } catch (NdbApiTemporaryException e) {
+        if (debugEnabled()) {
+          TRACER.debugCaught(DebugLogLevel.ERROR, e);
+        }
+        // Retry.
+        if (this.ndbConns[i] != null) {
+          this.ndbConns[i].close();
+          this.ndbConns[i] = null;
+        }
+        i--;
+        continue;
+      }
+    }
+
+    // Get NDB objects.
+    int connsIndex = 0;
+    for (int i = 0; i < this.ndbThreadCount; i++) {
+      Ndb ndb = ndbConns[connsIndex].createNdb(
+        BackendImpl.DATABASE_NAME, 1024);
+      connsIndex++;
+      if (connsIndex >= this.ndbNumConnections) {
+        connsIndex = 0;
+      }
+      try {
+        this.ndbQueue.put(ndb);
+      } catch (Exception e) {
+        if (debugEnabled())
+        {
+          TRACER.debugCaught(DebugLogLevel.ERROR, e);
+        }
+        if (ndb != null) {
+          ndb.close();
+        }
+      }
+    }
+
+    openAndRegisterEntryContainers(config.getBaseDN());
+  }
+
+  /**
+   * Opens the entry container for a base DN. If the entry container does not
+   * exist for the base DN, it will be created. The entry container will be
+   * opened with the same mode as the root container. Any entry containers
+   * opened in a read only root container will also be read only. Any entry
+   * containers opened in a non transactional root container will also be non
+   * transactional.
+   *
+   * @param baseDN The base DN of the entry container to open.
+   * @return The opened entry container.
+   * @throws NdbApiException If an error occurs while opening the entry
+   *                           container.
+   * @throws ConfigException If an configuration error occurs while opening
+   *                         the entry container.
+   */
+  public EntryContainer openEntryContainer(DN baseDN)
+      throws NdbApiException, ConfigException
+  {
+    String databasePrefix = baseDN.toNormalizedString();
+
+    EntryContainer ec = new EntryContainer(baseDN, databasePrefix,
+                                           backend, config, this);
+    ec.open();
+    return ec;
+  }
+
+  /**
+   * Registeres the entry container for a base DN.
+   *
+   * @param baseDN The base DN of the entry container to register.
+   * @param entryContainer The entry container to register for the baseDN.
+   * @throws Exception If an error occurs while registering the entry
+   *                           container.
+   */
+  public void registerEntryContainer(DN baseDN,
+                                     EntryContainer entryContainer)
+      throws Exception
+  {
+    EntryContainer ec1 = this.entryContainers.get(baseDN);
+
+    // If an entry container for this baseDN is already open we don't allow
+    // another to be opened.
+    if (ec1 != null)
+      // FIXME: Should be NDBException instance.
+      throw new Exception("An entry container named " +
+          ec1.getDatabasePrefix() + " is alreadly registered for base DN " +
+          baseDN.toString());
+
+    this.entryContainers.put(baseDN, entryContainer);
+  }
+
+  /**
+   * Opens the entry containers for multiple base DNs.
+   *
+   * @param baseDNs The base DNs of the entry containers to open.
+   * @throws NdbApiException If an error occurs while opening the entry
+   *                           container.
+   * @throws ConfigException if a configuration error occurs while opening the
+   *                         container.
+   */
+  private void openAndRegisterEntryContainers(Set<DN> baseDNs)
+      throws NdbApiException, ConfigException, Exception
+  {
+    for(DN baseDN : baseDNs)
+    {
+      EntryContainer ec = openEntryContainer(baseDN);
+      registerEntryContainer(baseDN, ec);
+    }
+  }
+
+  /**
+   * Unregisteres the entry container for a base DN.
+   *
+   * @param baseDN The base DN of the entry container to close.
+   * @return The entry container that was unregistered or NULL if a entry
+   * container for the base DN was not registered.
+   */
+  public EntryContainer unregisterEntryContainer(DN baseDN)
+  {
+    return entryContainers.remove(baseDN);
+
+  }
+
+  /**
+   * Close the root entryContainer.
+   *
+   * @throws NdbApiException If an error occurs while attempting to close
+   * the entryContainer.
+   */
+  public void close() throws NdbApiException
+  {
+    for(DN baseDN : entryContainers.keySet())
+    {
+      EntryContainer ec = unregisterEntryContainer(baseDN);
+      ec.exclusiveLock.lock();
+      try
+      {
+        ec.close();
+      }
+      finally
+      {
+        ec.exclusiveLock.unlock();
+      }
+    }
+
+    while (!this.ndbQueue.isEmpty()) {
+      Ndb ndb = null;
+      try {
+        ndb = this.ndbQueue.poll();
+        if (ndb != null) {
+          ndb.close();
+        }
+      } catch (Exception e) {
+        if (debugEnabled())
+        {
+          TRACER.debugCaught(DebugLogLevel.ERROR, e);
+        }
+      }
+    }
+    this.ndbQueue.clear();
+
+    for (NdbClusterConnection ndbConn : ndbConns) {
+      ndbConn.close();
+    }
+
+    config.removeNdbChangeListener(this);
+  }
+
+  /**
+   * Get NDB handle from the queue.
+   * @return NDB handle.
+   */
+  protected Ndb getNDB()
+  {
+    try {
+      return ndbQueue.take();
+    } catch (Exception e) {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      return null;
+    }
+  }
+
+  /**
+   * Release NDB handle to the queue.
+   * @param ndb handle to release.
+   */
+  protected void releaseNDB(Ndb ndb)
+  {
+    try {
+      ndbQueue.put(ndb);
+    } catch (Exception e) {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      if (ndb != null) {
+        ndb.close();
+      }
+      return;
+    }
+  }
+
+  /**
+   * Return all the entry containers in this root container.
+   *
+   * @return The entry containers in this root container.
+   */
+  public Collection<EntryContainer> getEntryContainers()
+  {
+    return entryContainers.values();
+  }
+
+  /**
+   * Returns all the baseDNs this root container stores.
+   *
+   * @return The set of DNs this root container stores.
+   */
+  public Set<DN> getBaseDNs()
+  {
+    return entryContainers.keySet();
+  }
+
+  /**
+   * Return the entry container for a specific base DN.
+   *
+   * @param baseDN The base DN of the entry container to retrive.
+   * @return The entry container for the base DN.
+   */
+  public EntryContainer getEntryContainer(DN baseDN)
+  {
+    EntryContainer ec = null;
+    DN nodeDN = baseDN;
+
+    while (ec == null && nodeDN != null)
+    {
+      ec = entryContainers.get(nodeDN);
+      if (ec == null)
+      {
+        nodeDN = nodeDN.getParentDNInSuffix();
+      }
+    }
+
+    return ec;
+  }
+
+  /**
+   * Get the backend configuration used by this root container.
+   *
+   * @return The NDB backend configuration used by this root container.
+   */
+  public NdbBackendCfg getConfiguration()
+  {
+    return config;
+  }
+
+  /**
+   * Get the total number of entries in this root container.
+   *
+   * @return The number of entries in this root container
+   * @throws NdbApiException If an error occurs while retrieving the entry
+   *                           count.
+   */
+  public long getEntryCount() throws NdbApiException
+  {
+    long entryCount = 0;
+    for(EntryContainer ec : this.entryContainers.values())
+    {
+      ec.sharedLock.lock();
+      try
+      {
+        entryCount += ec.getEntryCount();
+      }
+      finally
+      {
+        ec.sharedLock.unlock();
+      }
+    }
+
+    return entryCount;
+  }
+
+  /**
+   * Assign the next entry ID.
+   * @param ndb Ndb handle.
+   * @return The assigned entry ID.
+   */
+  public long getNextEntryID(Ndb ndb)
+  {
+    long eid = 0;
+    try
+    {
+      eid = ndb.getAutoIncrementValue(BackendImpl.NEXTID_TABLE,
+        NDB_NEXTID_RANGE);
+    }
+    catch (NdbApiException e)
+    {
+      if (debugEnabled())
+      {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+    }
+
+    return eid;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  public boolean isConfigurationChangeAcceptable(
+      NdbBackendCfg cfg,
+      List<Message> unacceptableReasons)
+  {
+    boolean acceptable = true;
+
+    return acceptable;
+  }
+
+  /**
+   * {@inheritDoc}
+   */
+  public ConfigChangeResult applyConfigurationChange(NdbBackendCfg cfg)
+  {
+    ConfigChangeResult ccr;
+    boolean adminActionRequired = false;
+    ArrayList<Message> messages = new ArrayList<Message>();
+
+    ccr = new ConfigChangeResult(ResultCode.SUCCESS, adminActionRequired,
+                                 messages);
+    return ccr;
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/DNContext.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/DNContext.java
new file mode 100644
index 0000000..bf6cf20
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/DNContext.java
@@ -0,0 +1,363 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.backends.ndb.importLDIF;
+
+import org.opends.server.types.DN;
+import org.opends.server.types.LDIFImportConfig;
+import org.opends.server.util.LDIFReader;
+import org.opends.server.backends.ndb.*;
+
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.*;
+import org.opends.server.admin.std.server.NdbBackendCfg;
+
+/**
+ * This class represents the import context for a destination base DN.
+ */
+public class DNContext {
+
+  /**
+   * The destination base DN.
+   */
+  private DN baseDN;
+
+  /**
+   * The include branches below the base DN.
+   */
+  private List<DN> includeBranches;
+
+  /**
+   * The exclude branches below the base DN.
+   */
+  private List<DN> excludeBranches;
+
+  /**
+   * The configuration of the destination backend.
+   */
+  private NdbBackendCfg config;
+
+  /**
+   * The requested LDIF import configuration.
+   */
+  private LDIFImportConfig ldifImportConfig;
+
+  /**
+   * A reader for the source LDIF file.
+   */
+  private LDIFReader ldifReader;
+
+  /**
+   * The entry entryContainer for the destination base DN.
+   */
+  private EntryContainer entryContainer;
+
+  /**
+   * The source entryContainer if this is a partial import of a base DN.
+   */
+  private EntryContainer srcEntryContainer;
+
+  /**
+   * A queue of elements that have been read from the LDIF and are ready
+   * to be imported.
+   */
+
+  private BlockingQueue<WorkElement> workQueue;
+
+  /**
+   * Map of pending DNs added to the work queue. Used to check if a parent
+   * entry has been added, but isn't in the database.
+   */
+  private ConcurrentHashMap<DN, DN> pendingMap =
+    new ConcurrentHashMap<DN, DN>() ;
+
+  /**
+   * The number of LDAP entries added to the database, used to update the
+   * entry database record count after import.  The value is not updated
+   * for replaced entries.  Multiple threads may be updating this value.
+   */
+  private AtomicLong entryInsertCount = new AtomicLong(0);
+
+  /**
+   * The parent DN of the previous imported entry.
+   */
+  private DN parentDN;
+
+
+  /**
+   * Get the work queue.
+   *
+   * @return  The work queue.
+   */
+  public BlockingQueue<WorkElement> getWorkQueue() {
+      return workQueue;
+    }
+
+
+  /**
+   * Set the work queue to the specified work queue.
+   *
+   * @param workQueue The work queue.
+   */
+  public void
+   setWorkQueue(BlockingQueue<WorkElement> workQueue) {
+    this.workQueue = workQueue;
+  }
+
+  /**
+   * Set the destination base DN.
+   * @param baseDN The destination base DN.
+   */
+  public void setBaseDN(DN baseDN)
+  {
+    this.baseDN = baseDN;
+  }
+
+  /**
+   * Get the destination base DN.
+   * @return The destination base DN.
+   */
+  public DN getBaseDN()
+  {
+    return baseDN;
+  }
+
+  /**
+   * Set the configuration of the destination backend.
+   * @param config The destination backend configuration.
+   */
+  public void setConfig(NdbBackendCfg config)
+  {
+    this.config = config;
+  }
+
+  /**
+   * Get the configuration of the destination backend.
+   * @return The destination backend configuration.
+   */
+  public NdbBackendCfg getConfig()
+  {
+    return config;
+  }
+
+  /**
+   * Set the requested LDIF import configuration.
+   * @param ldifImportConfig The LDIF import configuration.
+   */
+  public void setLDIFImportConfig(LDIFImportConfig ldifImportConfig)
+  {
+    this.ldifImportConfig = ldifImportConfig;
+  }
+
+  /**
+   * Get the requested LDIF import configuration.
+   * @return The requested LDIF import configuration.
+   */
+  public LDIFImportConfig getLDIFImportConfig()
+  {
+    return ldifImportConfig;
+  }
+
+  /**
+   * Set the source LDIF reader.
+   * @param ldifReader The source LDIF reader.
+   */
+  public void setLDIFReader(LDIFReader ldifReader)
+  {
+    this.ldifReader = ldifReader;
+  }
+
+  /**
+   * Get the source LDIF reader.
+   * @return The source LDIF reader.
+   */
+  public LDIFReader getLDIFReader()
+  {
+    return ldifReader;
+  }
+
+  /**
+   * Set the entry entryContainer for the destination base DN.
+   * @param entryContainer The entry entryContainer for the destination base DN.
+   */
+  public void setEntryContainer(EntryContainer entryContainer)
+  {
+    this.entryContainer = entryContainer;
+  }
+
+  /**
+   * Get the entry entryContainer for the destination base DN.
+   * @return The entry entryContainer for the destination base DN.
+   */
+  public EntryContainer getEntryContainer()
+  {
+    return entryContainer;
+  }
+
+  /**
+   * Set the source entry entryContainer for the destination base DN.
+   * @param srcEntryContainer The entry source entryContainer for the
+   * destination base DN.
+   */
+  public void setSrcEntryContainer(EntryContainer srcEntryContainer)
+  {
+    this.srcEntryContainer = srcEntryContainer;
+  }
+
+  /**
+   * Get the source entry entryContainer for the destination base DN.
+   * @return The source entry entryContainer for the destination base DN.
+   */
+  public EntryContainer getSrcEntryContainer()
+  {
+    return srcEntryContainer;
+  }
+
+  /**
+   * Get the number of new LDAP entries imported into the entry database.
+   * @return The number of new LDAP entries imported into the entry database.
+   */
+  public long getEntryInsertCount()
+  {
+    return entryInsertCount.get();
+  }
+
+  /**
+   * Increment the number of new LDAP entries imported into the entry database
+   * by the given amount.
+   * @param delta The amount to add.
+   */
+  public void incrEntryInsertCount(long delta)
+  {
+    entryInsertCount.getAndAdd(delta);
+  }
+
+  /**
+   * Get the parent DN of the previous imported entry.
+   * @return The parent DN of the previous imported entry.
+   */
+  public DN getParentDN()
+  {
+    return parentDN;
+  }
+
+  /**
+   * Set the parent DN of the previous imported entry.
+   * @param parentDN The parent DN of the previous imported entry.
+   */
+  public void setParentDN(DN parentDN)
+  {
+    this.parentDN = parentDN;
+  }
+
+  /**
+   * Retrieves the set of base DNs that specify the set of entries to
+   * exclude from the import.  The contents of the returned list may
+   * be altered by the caller.
+   *
+   * @return  The set of base DNs that specify the set of entries to
+   *          exclude from the import.
+   */
+  public List<DN> getExcludeBranches() {
+    return excludeBranches;
+  }
+
+  /**
+   * Specifies the set of base DNs that specify the set of entries to
+   * exclude from the import.
+   *
+   * @param  excludeBranches  The set of base DNs that specify the set
+   *                          of entries to exclude from the import.
+   */
+  public void setExcludeBranches(List<DN> excludeBranches) {
+    if (excludeBranches == null) {
+      this.excludeBranches = new ArrayList<DN>(0);
+    } else {
+      this.excludeBranches = excludeBranches;
+    }
+  }
+
+  /**
+   * Retrieves the set of base DNs that specify the set of entries to
+   * include in the import.  The contents of the returned list may be
+   * altered by the caller.
+   *
+   * @return  The set of base DNs that specify the set of entries to
+   *          include in the import.
+   */
+  public List<DN> getIncludeBranches() {
+    return includeBranches;
+  }
+
+  /**
+   * Specifies the set of base DNs that specify the set of entries to
+   * include in the import.
+   *
+   * @param  includeBranches  The set of base DNs that specify the set
+   *                          of entries to include in the import.
+   */
+  public void setIncludeBranches(List<DN> includeBranches) {
+    if (includeBranches == null) {
+      this.includeBranches = new ArrayList<DN>(0);
+    } else {
+      this.includeBranches = includeBranches;
+    }
+  }
+
+  /**
+   * Check if the parent DN is in the pending map.
+   *
+   * @param parentDN The DN of the parent.
+   * @return <CODE>True</CODE> if the parent is in the pending map.
+   */
+  public boolean isPending(DN parentDN) {
+    boolean ret = false;
+    if (pendingMap.containsKey(parentDN)) {
+      ret = true;
+    }
+    return ret;
+  }
+
+  /**
+   * Add specified DN to the pending map.
+   *
+   * @param dn The DN to add to the map.
+   */
+  public void addPending(DN dn) {
+    pendingMap.putIfAbsent(dn, dn);
+  }
+
+  /**
+   * Remove the specified DN from the pending map.
+   *
+   * @param dn The DN to remove from the map.
+   */
+  public void removePending(DN dn) {
+    pendingMap.remove(dn);
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/Importer.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/Importer.java
new file mode 100644
index 0000000..1e9c8ab
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/Importer.java
@@ -0,0 +1,643 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+
+package org.opends.server.backends.ndb.importLDIF;
+
+import org.opends.server.types.*;
+import org.opends.server.loggers.debug.DebugTracer;
+import static org.opends.server.loggers.debug.DebugLogger.getTracer;
+import static org.opends.server.loggers.debug.DebugLogger.debugEnabled;
+import static org.opends.server.loggers.ErrorLogger.logError;
+import org.opends.server.util.LDIFReader;
+import org.opends.server.util.StaticUtils;
+import org.opends.server.util.LDIFException;
+import org.opends.server.util.RuntimeInformation;
+import static org.opends.server.util.DynamicConstants.BUILD_ID;
+import static org.opends.server.util.DynamicConstants.REVISION_NUMBER;
+import org.opends.server.config.ConfigException;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.backends.ndb.*;
+import org.opends.messages.Message;
+import org.opends.messages.NdbMessages;
+import static org.opends.messages.NdbMessages.*;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.*;
+import java.io.IOException;
+import org.opends.server.admin.std.server.NdbBackendCfg;
+
+/**
+ * Performs a LDIF import.
+ */
+
+public class Importer implements Thread.UncaughtExceptionHandler {
+
+
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+  /**
+   * The NDB backend configuration.
+   */
+  private NdbBackendCfg config;
+
+  /**
+   * The root container used for this import job.
+   */
+  private RootContainer rootContainer;
+
+  /**
+   * The LDIF import configuration.
+   */
+  private LDIFImportConfig ldifImportConfig;
+
+  /**
+   * The LDIF reader.
+   */
+  private LDIFReader reader;
+
+  /**
+   * Map of base DNs to their import context.
+   */
+  private LinkedHashMap<DN, DNContext> importMap =
+      new LinkedHashMap<DN, DNContext>();
+
+  /**
+   * The number of entries migrated.
+   */
+  private int migratedCount;
+
+  /**
+   * The number of entries imported.
+   */
+  private int importedCount;
+
+  /**
+   * The number of milliseconds between job progress reports.
+   */
+  private long progressInterval = 10000;
+
+  /**
+   * The progress report timer.
+   */
+  private Timer timer;
+
+  // Thread array.
+  private CopyOnWriteArrayList<WorkThread> threads;
+
+  // Progress task.
+  private ProgressTask pTask;
+
+  // A thread threw an Runtime exception stop the import.
+  private boolean unCaughtExceptionThrown = false;
+
+  /**
+   * Create a new import job with the specified ldif import config.
+   *
+   * @param ldifImportConfig The LDIF import config.
+   */
+  public Importer(LDIFImportConfig ldifImportConfig)
+  {
+    this.ldifImportConfig = ldifImportConfig;
+    this.threads = new CopyOnWriteArrayList<WorkThread>();
+  }
+
+  /**
+   * Start the worker threads.
+   */
+  private void startWorkerThreads() {
+
+    int importThreadCount = config.getImportThreadCount();
+
+    // Create one set of worker threads/buffer managers for each base DN.
+    for (DNContext context : importMap.values()) {
+      for (int i = 0; i < importThreadCount; i++) {
+        WorkThread t =
+          new WorkThread(context.getWorkQueue(), i, rootContainer);
+        t.setUncaughtExceptionHandler(this);
+        threads.add(t);
+        t.start();
+      }
+    }
+    // Start a timer for the progress report.
+    timer = new Timer();
+    TimerTask progressTask = new ProgressTask();
+    pTask = (ProgressTask) progressTask;
+    timer.scheduleAtFixedRate(progressTask, progressInterval,
+                              progressInterval);
+  }
+
+
+  /**
+   * Import a ldif using the specified root container.
+   *
+   * @param rootContainer  The root container.
+   * @return A LDIF result.
+   * @throws IOException If a IO error occurs.
+   * @throws NDBException If a NDB error occurs.
+   * @throws ConfigException If a configuration has an error.
+   */
+  public LDIFImportResult processImport(RootContainer rootContainer)
+    throws IOException, ConfigException, NDBException {
+
+    // Create an LDIF reader. Throws an exception if the file does not exist.
+    reader = new LDIFReader(ldifImportConfig);
+    this.rootContainer = rootContainer;
+    this.config = rootContainer.getConfiguration();
+
+    Message message;
+    long startTime;
+    try {
+      int importThreadCount = config.getImportThreadCount();
+      message = NOTE_NDB_IMPORT_STARTING.get(DirectoryServer.getVersionString(),
+                                                     BUILD_ID, REVISION_NUMBER);
+      logError(message);
+      message = NOTE_NDB_IMPORT_THREAD_COUNT.get(importThreadCount);
+      logError(message);
+      RuntimeInformation.logInfo();
+      for (EntryContainer entryContainer : rootContainer.getEntryContainers()) {
+        DNContext DNContext =  getImportContext(entryContainer);
+        if(DNContext != null) {
+          importMap.put(entryContainer.getBaseDN(), DNContext);
+        }
+      }
+      // Make a note of the time we started.
+      startTime = System.currentTimeMillis();
+      startWorkerThreads();
+      try {
+        importedCount = 0;
+        processLDIF();
+      } finally {
+        if(!unCaughtExceptionThrown) {
+          cleanUp();
+        }
+      }
+    }
+    finally {
+      reader.close();
+    }
+    importProlog(startTime);
+    return new LDIFImportResult(reader.getEntriesRead(),
+                                reader.getEntriesRejected(),
+                                reader.getEntriesIgnored());
+  }
+
+
+  /**
+   * Create and log messages at the end of the successful import.
+   *
+   * @param startTime The time the import started.
+   */
+  private void importProlog(long startTime) {
+    Message message;
+    long finishTime = System.currentTimeMillis();
+    long importTime = (finishTime - startTime);
+
+    float rate = 0;
+    if (importTime > 0)
+    {
+      rate = 1000f*importedCount / importTime;
+    }
+
+    message = NOTE_NDB_IMPORT_FINAL_STATUS.
+        get(reader.getEntriesRead(), importedCount,
+            reader.getEntriesIgnored(), reader.getEntriesRejected(),
+            migratedCount, importTime/1000, rate);
+    logError(message);
+  }
+
+
+  /**
+   * Process a LDIF reader.
+   *
+   * @throws NDBException If a NDB problem occurs.
+   */
+  private void
+  processLDIF() throws NDBException {
+    Message message = NOTE_NDB_IMPORT_LDIF_START.get();
+    logError(message);
+    do {
+      if (ldifImportConfig.isCancelled()) {
+        break;
+      }
+      if(threads.size() <= 0) {
+        message = ERR_NDB_IMPORT_NO_WORKER_THREADS.get();
+        throw new NDBException(message);
+      }
+      if(unCaughtExceptionThrown) {
+        abortImport();
+      }
+      try {
+        // Read the next entry.
+        Entry entry = reader.readEntry();
+        // Check for end of file.
+        if (entry == null) {
+          message = NOTE_NDB_IMPORT_LDIF_END.get();
+          logError(message);
+
+          break;
+        }
+        // Route it according to base DN.
+        DNContext DNContext = getImportConfig(entry.getDN());
+        processEntry(DNContext, entry);
+      }  catch (LDIFException e) {
+        if (debugEnabled()) {
+          TRACER.debugCaught(DebugLogLevel.ERROR, e);
+        }
+      } catch (DirectoryException e) {
+        if (debugEnabled()) {
+          TRACER.debugCaught(DebugLogLevel.ERROR, e);
+        }
+      } catch (Exception e)  {
+        if (debugEnabled()) {
+          TRACER.debugCaught(DebugLogLevel.ERROR, e);
+        }
+      }
+    } while (true);
+  }
+
+  /**
+   * Process an entry using the specified import context.
+   *
+   * @param DNContext The import context.
+   * @param entry The entry to process.
+   */
+  private void processEntry(DNContext DNContext, Entry entry) {
+    //Add this DN to the pending map.
+    DNContext.addPending(entry.getDN());
+    addEntryQueue(DNContext, entry);
+  }
+
+  /**
+   * Add work item to specified import context's queue.
+   * @param context The import context.
+   * @param item The work item to add.
+   * @return <CODE>True</CODE> if the the work  item was added to the queue.
+   */
+  private boolean
+  addQueue(DNContext context, WorkElement item) {
+    try {
+      while(!context.getWorkQueue().offer(item, 1000,
+                                            TimeUnit.MILLISECONDS)) {
+        if(threads.size() <= 0) {
+          // All worker threads died. We must stop now.
+          return false;
+        }
+      }
+    } catch (InterruptedException e) {
+      if (debugEnabled()) {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+    }
+    return true;
+  }
+
+
+  /**
+   * Wait until the work queue is empty.
+   */
+  private void drainWorkQueue() {
+    if(threads.size() > 0) {
+      for (DNContext context : importMap.values()) {
+        while (context.getWorkQueue().size() > 0) {
+          try {
+            Thread.sleep(100);
+          } catch (Exception e) {
+            // No action needed.
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * Abort import.
+   * @throws org.opends.server.backends.ndb.NDBException
+   */
+  private void abortImport() throws NDBException {
+     // Stop work threads telling them to skip substring flush.
+     stopWorkThreads(false);
+     timer.cancel();
+     Message message = ERR_NDB_IMPORT_LDIF_ABORT.get();
+     throw new NDBException(message);
+  }
+
+  /**
+   * Stop work threads.
+   *
+   * @param abort <CODE>True</CODE> if stop work threads was called from an
+   *              abort.
+   * @throws NDBException if a NDB error occurs.
+   */
+  private void
+  stopWorkThreads(boolean abort) throws NDBException {
+    for (WorkThread t : threads) {
+      t.stopProcessing();
+    }
+    // Wait for each thread to stop.
+    for (WorkThread t : threads) {
+      try {
+        if(!abort && unCaughtExceptionThrown) {
+          timer.cancel();
+          Message message = ERR_NDB_IMPORT_LDIF_ABORT.get();
+          throw new NDBException(message);
+        }
+        t.join();
+        importedCount += t.getImportedCount();
+      } catch (InterruptedException ie) {
+        // No action needed?
+      }
+    }
+  }
+
+  /**
+   * Clean up after a successful import.
+   *
+   * @throws NDBException If a NDB error occurs.
+   */
+  private void cleanUp() throws NDBException {
+    // Drain the work queue.
+    drainWorkQueue();
+    pTask.setPause(true);
+    stopWorkThreads(true);
+    timer.cancel();
+  }
+
+  /**
+   * Uncaught exception handler.
+   *
+   * @param t The thread working when the exception was thrown.
+   * @param e The exception.
+   */
+  public void uncaughtException(Thread t, Throwable e) {
+     unCaughtExceptionThrown = true;
+     threads.remove(t);
+     Message msg = ERR_NDB_IMPORT_THREAD_EXCEPTION.get(
+         t.getName(), StaticUtils.stackTraceToSingleLineString(e.getCause()));
+     logError(msg);
+   }
+
+  /**
+   * Return an import context related to the specified DN.
+   * @param dn The dn.
+   * @return  An import context.
+   * @throws DirectoryException If an directory error occurs.
+   */
+  private DNContext getImportConfig(DN dn) throws DirectoryException {
+    DNContext DNContext = null;
+    DN nodeDN = dn;
+
+    while (DNContext == null && nodeDN != null) {
+      DNContext = importMap.get(nodeDN);
+      if (DNContext == null)
+      {
+        nodeDN = nodeDN.getParentDNInSuffix();
+      }
+    }
+
+    if (nodeDN == null) {
+      // The entry should not have been given to this backend.
+      Message message =
+              NdbMessages.ERR_NDB_INCORRECT_ROUTING.get(String.valueOf(dn));
+      throw new DirectoryException(ResultCode.NO_SUCH_OBJECT, message);
+    }
+
+    return DNContext;
+  }
+
+  /**
+   * Creates an import context for the specified entry container.
+   *
+   * @param entryContainer The entry container.
+   * @return Import context to use during import.
+   * @throws ConfigException If a configuration contains error.
+   */
+   private DNContext getImportContext(EntryContainer entryContainer)
+      throws ConfigException {
+    DN baseDN = entryContainer.getBaseDN();
+    EntryContainer srcEntryContainer = null;
+    List<DN> includeBranches = new ArrayList<DN>();
+    List<DN> excludeBranches = new ArrayList<DN>();
+
+    if(!ldifImportConfig.appendToExistingData() &&
+        !ldifImportConfig.clearBackend())
+    {
+      for(DN dn : ldifImportConfig.getExcludeBranches())
+      {
+        if(baseDN.equals(dn))
+        {
+          // This entire base DN was explicitly excluded. Skip.
+          return null;
+        }
+        if(baseDN.isAncestorOf(dn))
+        {
+          excludeBranches.add(dn);
+        }
+      }
+
+      if(!ldifImportConfig.getIncludeBranches().isEmpty())
+      {
+        for(DN dn : ldifImportConfig.getIncludeBranches())
+        {
+          if(baseDN.isAncestorOf(dn))
+          {
+            includeBranches.add(dn);
+          }
+        }
+
+        if(includeBranches.isEmpty())
+        {
+          // There are no branches in the explicitly defined include list under
+          // this base DN. Skip this base DN alltogether.
+          return null;
+        }
+
+        // Remove any overlapping include branches.
+        Iterator<DN> includeBranchIterator = includeBranches.iterator();
+        while(includeBranchIterator.hasNext())
+        {
+          DN includeDN = includeBranchIterator.next();
+          boolean keep = true;
+          for(DN dn : includeBranches)
+          {
+            if(!dn.equals(includeDN) && dn.isAncestorOf(includeDN))
+            {
+              keep = false;
+              break;
+            }
+          }
+          if(!keep)
+          {
+            includeBranchIterator.remove();
+          }
+        }
+
+        // Remvoe any exclude branches that are not are not under a include
+        // branch since they will be migrated as part of the existing entries
+        // outside of the include branches anyways.
+        Iterator<DN> excludeBranchIterator = excludeBranches.iterator();
+        while(excludeBranchIterator.hasNext())
+        {
+          DN excludeDN = excludeBranchIterator.next();
+          boolean keep = false;
+          for(DN includeDN : includeBranches)
+          {
+            if(includeDN.isAncestorOf(excludeDN))
+            {
+              keep = true;
+              break;
+            }
+          }
+          if(!keep)
+          {
+            excludeBranchIterator.remove();
+          }
+        }
+      }
+    }
+
+    // Create an import context.
+    DNContext DNContext = new DNContext();
+    DNContext.setConfig(config);
+    DNContext.setLDIFImportConfig(this.ldifImportConfig);
+    DNContext.setLDIFReader(reader);
+
+    DNContext.setBaseDN(baseDN);
+    DNContext.setEntryContainer(entryContainer);
+    DNContext.setSrcEntryContainer(srcEntryContainer);
+
+    // Create queue.
+    LinkedBlockingQueue<WorkElement> works =
+        new LinkedBlockingQueue<WorkElement>
+                     (config.getImportQueueSize());
+    DNContext.setWorkQueue(works);
+
+    // Set the include and exclude branches
+    DNContext.setIncludeBranches(includeBranches);
+    DNContext.setExcludeBranches(excludeBranches);
+
+    return DNContext;
+  }
+
+  /**
+   * Add specified context and entry to the work queue.
+   *
+   * @param context The context related to the entry DN.
+   * @param entry The entry to work on.
+   * @return  <CODE>True</CODE> if the element was added to the work queue.
+   */
+  private boolean
+  addEntryQueue(DNContext context,  Entry entry) {
+    WorkElement element =
+            WorkElement.decode(entry, context);
+    return addQueue(context, element);
+  }
+
+  /**
+   * This class reports progress of the import job at fixed intervals.
+   */
+  private final class ProgressTask extends TimerTask
+  {
+    /**
+     * The number of entries that had been read at the time of the
+     * previous progress report.
+     */
+    private long previousCount = 0;
+
+    /**
+     * The time in milliseconds of the previous progress report.
+     */
+    private long previousTime;
+
+    /**
+     * The number of bytes in a megabyte.
+     * Note that 1024*1024 bytes may eventually become known as a mebibyte(MiB).
+     */
+    public static final int bytesPerMegabyte = 1024*1024;
+
+    // Determines if the ldif is being read.
+    private boolean ldifRead = false;
+
+    // Suspend output.
+    private boolean pause = false;
+
+    /**
+     * Create a new import progress task.
+     */
+    public ProgressTask()
+    {
+      previousTime = System.currentTimeMillis();
+    }
+
+    /**
+     * Return if reading the LDIF file.
+     */
+    public void ldifRead() {
+      ldifRead = true;
+    }
+
+    /**
+     * Suspend output if true.
+     *
+     * @param v The value to set the suspend value to.
+     */
+    public void setPause(boolean v) {
+    pause=v;
+   }
+
+    /**
+     * The action to be performed by this timer task.
+     */
+    public void run() {
+      long latestCount = reader.getEntriesRead() + 0;
+      long deltaCount = (latestCount - previousCount);
+      long latestTime = System.currentTimeMillis();
+      long deltaTime = latestTime - previousTime;
+      Message message;
+      if (deltaTime == 0) {
+        return;
+      }
+      if(pause) {
+        return;
+      }
+      if(!ldifRead) {
+        long numRead     = reader.getEntriesRead();
+        long numIgnored  = reader.getEntriesIgnored();
+        long numRejected = reader.getEntriesRejected();
+        float rate = 1000f*deltaCount / deltaTime;
+        message = NOTE_NDB_IMPORT_PROGRESS_REPORT.get(
+            numRead, numIgnored, numRejected, 0, rate);
+        logError(message);
+      }
+      previousCount = latestCount;
+      previousTime = latestTime;
+    }
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/WorkElement.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/WorkElement.java
new file mode 100644
index 0000000..dd8927c
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/WorkElement.java
@@ -0,0 +1,104 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+
+package org.opends.server.backends.ndb.importLDIF;
+
+import org.opends.server.types.Entry;
+
+/**
+ * A work element passed on the work queue.
+ */
+public class WorkElement {
+
+  // The entry to import.
+  private Entry entry;
+
+  // Used in replace mode, this is the entry to replace.
+  private Entry existingEntry;
+
+  // The context related to the entry.
+  private DNContext context;
+
+  /**
+   * Create a work element instance.
+   *
+   * @param entry The entry to import.
+   * @param context The context related to the entry.
+   */
+  private WorkElement(Entry entry, DNContext context )  {
+    this.entry = entry;
+    this.context = context;
+  }
+
+  /**
+   * Static to create an work element.
+   *
+   * @param entry The entry to import.
+   * @param context The context related to the entry.
+   * @return  A work element to put on the queue.
+   */
+  public static
+  WorkElement decode(Entry entry, DNContext context ) {
+    return new WorkElement(entry, context);
+  }
+
+  /**
+   * Return the entry to import.
+   *
+   * @return  The entry to import.
+   */
+  public Entry getEntry() {
+    return entry;
+  }
+
+  /**
+   * Return the context related to the entry.
+   *
+   * @return The context.
+   */
+  public DNContext getContext() {
+    return context;
+  }
+
+  /**
+   * Return an existing entry, used during replace mode.
+   *
+   * @return An existing entry.
+   */
+  public Entry getExistingEntry() {
+    return existingEntry;
+  }
+
+  /**
+   * Set the existing entry.
+   *
+   * @param existingEntry The existing entry to set.
+   */
+  public void setExistingEntry(Entry existingEntry) {
+    this.existingEntry = existingEntry;
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/WorkThread.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/WorkThread.java
new file mode 100644
index 0000000..3935281
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/WorkThread.java
@@ -0,0 +1,240 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+
+package org.opends.server.backends.ndb.importLDIF;
+
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import org.opends.server.loggers.debug.DebugTracer;
+import org.opends.server.types.*;
+import org.opends.server.api.DirectoryThread;
+import org.opends.server.backends.ndb.*;
+import org.opends.messages.Message;
+import static org.opends.messages.NdbMessages.*;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.TimeUnit;
+import org.opends.server.core.AddOperation;
+import org.opends.server.protocols.internal.InternalClientConnection;
+
+/**
+ * A thread to process import entries from a queue.  Multiple instances of
+ * this class process entries from a single shared queue.
+ */
+public class WorkThread extends DirectoryThread {
+
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+  /**
+   * Number of operations to batch on a single transaction.
+   */
+  private static final int TXN_BATCH_SIZE = 15;
+
+  /*
+   * Work queue of work items.
+   */
+  private BlockingQueue<WorkElement> workQueue;
+
+  /**
+   * The number of entries imported by this thread.
+   */
+  private int importedCount = 0;
+
+  /**
+   * Root container.
+   */
+  private RootContainer rootContainer;
+
+  /**
+   * Abstract Transaction object.
+   */
+  private AbstractTransaction txn;
+
+  /**
+   * A flag that is set when the thread has been told to stop processing.
+   */
+  private boolean stopRequested = false;
+
+  /**
+   * The thread number related to a thread.
+   */
+  private int threadNumber;
+
+
+
+  /**
+   * Create a work thread instance using the specified parameters.
+   *
+   * @param workQueue  The work queue to pull work off of.
+   * @param threadNumber The thread number.
+   * @param rootContainer The root container.
+   */
+  public WorkThread(BlockingQueue<WorkElement> workQueue, int threadNumber,
+                                RootContainer rootContainer)
+  {
+    super("Import Worker Thread " + threadNumber);
+    this.threadNumber = threadNumber;
+    this.workQueue = workQueue;
+    this.rootContainer = rootContainer;
+    this.txn = new AbstractTransaction(rootContainer);
+  }
+
+  /**
+   * Get the number of entries imported by this thread.
+   * @return The number of entries imported by this thread.
+   */
+   int getImportedCount() {
+    return importedCount;
+  }
+
+  /**
+   * Tells the thread to stop processing.
+   */
+   void stopProcessing() {
+    stopRequested = true;
+  }
+
+  /**
+   * Run the thread. Read from item from queue and process unless told to stop.
+   */
+  @Override
+  public void run()
+  {
+    int batchSize = 0;
+    try {
+      do {
+        try {
+          WorkElement element = workQueue.poll(1000, TimeUnit.MILLISECONDS);
+          if (element != null) {
+            process(element);
+            batchSize++;
+            if (batchSize < TXN_BATCH_SIZE) {
+              continue;
+            } else {
+              batchSize = 0;
+              txn.commit();
+            }
+          }
+        }
+        catch (InterruptedException e) {
+          if (debugEnabled()) {
+            TRACER.debugCaught(DebugLogLevel.ERROR, e);
+          }
+        }
+      } while (!stopRequested);
+      txn.commit();
+    } catch (Exception e) {
+      if (debugEnabled()) {
+        TRACER.debugCaught(DebugLogLevel.ERROR, e);
+      }
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Process a work element.
+   *
+   * @param element The work elemenet to process.
+   *
+   * @throws Exception If an error occurs.
+   */
+  private void process(WorkElement element) throws Exception
+  {
+    Entry entry = element.getEntry();
+    DNContext context = element.getContext();
+    EntryContainer ec = context.getEntryContainer();
+
+    DN entryDN = entry.getDN();
+    DN parentDN = context.getEntryContainer().getParentWithinBase(entryDN);
+
+    if (parentDN != null) {
+      // If the parent is in the pending map, another thread is working on
+      // the parent entry; wait until that thread is done with the parent.
+      while (context.isPending(parentDN)) {
+        try {
+          Thread.sleep(50);
+        } catch (Exception e) {
+          return;
+        }
+      }
+      if (context.getParentDN() == null) {
+        Message msg =
+                ERR_NDB_IMPORT_PARENT_NOT_FOUND.get(parentDN.toString());
+        rejectLastEntry(context, msg);
+        context.removePending(entryDN);
+        return;
+      }
+    } else {
+      parentDN = entryDN;
+    }
+
+    InternalClientConnection conn =
+      InternalClientConnection.getRootConnection();
+
+    AddOperation addOperation =
+      conn.processAdd(entry.getDN(), entry.getObjectClasses(),
+      entry.getUserAttributes(), entry.getOperationalAttributes());
+
+    try {
+      ec.addEntryNoCommit(entry, addOperation, txn);
+      DN contextParentDN = context.getParentDN();
+      if ((contextParentDN == null) ||
+        !contextParentDN.equals(parentDN)) {
+        txn.commit();
+      }
+      importedCount++;
+    } catch (DirectoryException de) {
+      if (de.getResultCode() == ResultCode.ENTRY_ALREADY_EXISTS) {
+          Message msg = WARN_NDB_IMPORT_ENTRY_EXISTS.get();
+          rejectLastEntry(context, msg);
+          context.removePending(entryDN);
+          txn.close();
+      } else {
+        txn.close();
+        throw de;
+      }
+    }
+
+    context.setParentDN(parentDN);
+    context.removePending(entryDN);
+
+    return;
+  }
+
+  /**
+   * The synchronized wrapper method to reject the last entry.
+   *
+   * @param context Import context.
+   * @param msg Reject message.
+   */
+  private static synchronized void rejectLastEntry(DNContext context,
+    Message msg)
+  {
+    context.getLDIFReader().rejectLastEntry(msg);
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/package-info.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/package-info.java
new file mode 100644
index 0000000..5610502
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/importLDIF/package-info.java
@@ -0,0 +1,36 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+
+
+
+/**
+ * Contains the code for the import LDIF feature of NDB backend.
+ */
+@org.opends.server.types.PublicAPI(
+     stability=org.opends.server.types.StabilityLevel.PRIVATE)
+package org.opends.server.backends.ndb.importLDIF;
+
diff --git a/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/package-info.java b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/package-info.java
new file mode 100644
index 0000000..0ff05a7
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/backends/ndb/package-info.java
@@ -0,0 +1,37 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+
+
+
+/**
+ * Contains the code for the Directory Server backend that uses the
+ * MySQL NDB Cluster as the repository for storing entry and index
+ * information.
+ */
+@org.opends.server.types.PublicAPI(
+     stability=org.opends.server.types.StabilityLevel.PRIVATE)
+package org.opends.server.backends.ndb;
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendAddOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendAddOperation.java
index 2f6cd3f..c81a413 100644
--- a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendAddOperation.java
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendAddOperation.java
@@ -22,7 +22,7 @@
  * CDDL HEADER END
  *
  *
- *      Copyright 2008 Sun Microsystems, Inc.
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
  */
 package org.opends.server.workflowelement.localbackend;
 
@@ -95,29 +95,45 @@
 
 
 
-  // The backend in which the entry is to be added.
-  private Backend backend;
+  /**
+   * The backend in which the entry is to be added.
+   */
+  protected Backend backend;
 
-  // Indicates whether the request includes the LDAP no-op control.
-  private boolean noOp;
+  /**
+   * Indicates whether the request includes the LDAP no-op control.
+   */
+  protected boolean noOp;
 
-  // The DN of the entry to be added.
-  private DN entryDN;
+  /**
+   * The DN of the entry to be added.
+   */
+  protected DN entryDN;
 
-  // The entry being added to the server.
-  private Entry entry;
+  /**
+   * The entry being added to the server.
+   */
+  protected Entry entry;
 
-  // The post-read request control included in the request, if applicable.
-  LDAPPostReadRequestControl postReadRequest;
+  /**
+   * The post-read request control included in the request, if applicable.
+   */
+  protected LDAPPostReadRequestControl postReadRequest;
 
-  // The set of object classes for the entry to add.
-  private Map<ObjectClass, String> objectClasses;
+  /**
+   * The set of object classes for the entry to add.
+   */
+  protected Map<ObjectClass, String> objectClasses;
 
-  // The set of operational attributes for the entry to add.
-  private Map<AttributeType,List<Attribute>> operationalAttributes;
+  /**
+   * The set of operational attributes for the entry to add.
+   */
+  protected Map<AttributeType,List<Attribute>> operationalAttributes;
 
-  // The set of user attributes for the entry to add.
-  private Map<AttributeType,List<Attribute>> userAttributes;
+  /**
+   * The set of user attributes for the entry to add.
+   */
+  protected Map<AttributeType,List<Attribute>> userAttributes;
 
 
 
@@ -159,7 +175,7 @@
    * @throws CanceledOperationException
    *           if this operation should be cancelled
    */
-  void processLocalAdd(final LocalBackendWorkflowElement wfe)
+  public void processLocalAdd(final LocalBackendWorkflowElement wfe)
       throws CanceledOperationException
   {
     boolean executePostOpPlugins = false;
@@ -862,7 +878,7 @@
    *                              attributes and the server is configured to
    *                              reject such entries.
    */
-  private void addRDNAttributesIfNecessary()
+  protected void addRDNAttributesIfNecessary()
           throws DirectoryException
   {
     RDN rdn = entryDN.getRDN();
@@ -1272,7 +1288,7 @@
    * @throws  DirectoryException  If the entry violates the server schema
    *                              configuration.
    */
-  private void checkSchema(Entry parentEntry)
+  protected void checkSchema(Entry parentEntry)
           throws DirectoryException
   {
     MessageBuilder invalidReason = new MessageBuilder();
@@ -1443,7 +1459,7 @@
    * @throws  DirectoryException  If there is a problem with any of the
    *                              request controls.
    */
-  private void processControls(DN parentDN)
+  protected void processControls(DN parentDN)
           throws DirectoryException
   {
     List<Control> requestControls = getRequestControls();
@@ -1581,7 +1597,7 @@
   /**
    * Adds the post-read response control to the response.
    */
-  private void addPostReadResponse()
+  protected void addPostReadResponse()
   {
     Entry addedEntry = entry.duplicate(true);
 
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendBindOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendBindOperation.java
index bce9971..93135f0 100644
--- a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendBindOperation.java
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendBindOperation.java
@@ -22,7 +22,7 @@
  * CDDL HEADER END
  *
  *
- *      Copyright 2008 Sun Microsystems, Inc.
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
  */
 package org.opends.server.workflowelement.localbackend;
 
@@ -96,15 +96,21 @@
 
 
 
-  // The backend in which the bind operation should be processed.
-  private Backend backend;
+  /**
+   * The backend in which the bind operation should be processed.
+   */
+  protected Backend backend;
 
-  // Indicates whether the bind response should include the first warning for an
-  // upcoming password expiration.
-  private boolean isFirstWarning;
+  /**
+   * Indicates whether the bind response should include the first warning
+   * for an upcoming password expiration.
+   */
+  protected boolean isFirstWarning;
 
-  // Indicates whether this bind is using a grace login for the user.
-  private boolean isGraceLogin;
+  /**
+   * Indicates whether this bind is using a grace login for the user.
+   */
+  protected boolean isGraceLogin;
 
   // Indicates whether the user must change his/her password before doing
   // anything else.
@@ -117,14 +123,18 @@
   // control in the bind response.
   private boolean returnAuthzID;
 
-  // Indicates whether to execute post-operation plugins.
-  private boolean executePostOpPlugins;
+  /**
+   * Indicates whether to execute post-operation plugins.
+   */
+  protected boolean executePostOpPlugins;
 
   // The client connection associated with this bind operation.
   private ClientConnection clientConnection;
 
-  // The bind DN provided by the client.
-  private DN bindDN;
+  /**
+   * The bind DN provided by the client.
+   */
+  protected DN bindDN;
 
   // The lookthrough limit that should be enforced for the user.
   private int lookthroughLimit;
@@ -141,11 +151,15 @@
   // The idle time limit that should be enforced for the user.
   private long idleTimeLimit;
 
-  // The password policy that applies to the user.
-  private PasswordPolicy policy;
+  /**
+   * The password policy that applies to the user.
+   */
+  protected PasswordPolicy policy;
 
-  // The password policy state for the user.
-  private PasswordPolicyState pwPolicyState;
+  /**
+   * The password policy state for the user.
+   */
+  protected PasswordPolicyState pwPolicyState;
 
   // The password policy error type for this bind operation.
   private PasswordPolicyErrorType pwPolicyErrorType;
@@ -153,8 +167,10 @@
   // The password policy warning type for this bind operation.
   private PasswordPolicyWarningType pwPolicyWarningType;
 
-  // The plugin config manager for the Directory Server.
-  private PluginConfigManager pluginConfigManager;
+  /**
+   * The plugin config manager for the Directory Server.
+   */
+  protected PluginConfigManager pluginConfigManager;
 
   // The SASL mechanism used for this bind operation.
   private String saslMechanism;
@@ -182,7 +198,7 @@
    *          The local backend work-flow element.
    *
    */
-  void processLocalBind(LocalBackendWorkflowElement wfe)
+  public void processLocalBind(LocalBackendWorkflowElement wfe)
   {
     this.backend = wfe.getBackend();
 
@@ -475,7 +491,7 @@
    * @throws  DirectoryException  If a problem occurs that should cause the bind
    *                              operation to fail.
    */
-  private boolean processSimpleBind()
+  protected boolean processSimpleBind()
           throws DirectoryException
   {
     // See if this is an anonymous bind.  If so, then determine whether
@@ -680,10 +696,12 @@
   /**
    * Performs the processing necessary for an anonymous simple bind.
    *
+   * @return  {@code true} if processing should continue for the operation, or
+   *          {@code false} if not.
    * @throws  DirectoryException  If a problem occurs that should cause the bind
    *                              operation to fail.
    */
-  private boolean processAnonymousSimpleBind()
+  protected boolean processAnonymousSimpleBind()
           throws DirectoryException
   {
     // If the server is in lockdown mode, then fail.
@@ -907,8 +925,8 @@
    * @throws  DirectoryException  If a problem occurs that should cause the bind
    *                              to fail.
    */
-  private void checkPasswordPolicyState(Entry userEntry,
-                                        SASLMechanismHandler<?> saslHandler)
+  protected void checkPasswordPolicyState(Entry userEntry,
+                                          SASLMechanismHandler<?> saslHandler)
           throws DirectoryException
   {
     boolean isSASLBind = (saslHandler != null);
@@ -1119,7 +1137,7 @@
    *
    * @param  userEntry  The entry for the authenticated user.
    */
-  private void setResourceLimits(Entry userEntry)
+  protected void setResourceLimits(Entry userEntry)
   {
     // See if the user's entry contains a custom size limit.
     AttributeType attrType =
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendCompareOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendCompareOperation.java
index 0c0aa6b..2302dd3 100644
--- a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendCompareOperation.java
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendCompareOperation.java
@@ -22,7 +22,7 @@
  * CDDL HEADER END
  *
  *
- *      Copyright 2008 Sun Microsystems, Inc.
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
  */
 package org.opends.server.workflowelement.localbackend;
 
@@ -73,17 +73,25 @@
 
 
 
-  // The backend in which the comparison is to be performed.
-  private Backend backend;
+  /**
+   * The backend in which the comparison is to be performed.
+   */
+  protected Backend backend;
 
-  // The client connection for this operation.
-  private ClientConnection clientConnection;
+  /**
+   * The client connection for this operation.
+   */
+  protected ClientConnection clientConnection;
 
-  // The DN of the entry to compare.
-  private DN entryDN;
+  /**
+   * The DN of the entry to compare.
+   */
+  protected DN entryDN;
 
-  // The entry to be compared.
-  private Entry entry = null;
+  /**
+   * The entry to be compared.
+   */
+  protected Entry entry = null;
 
 
 
@@ -121,7 +129,7 @@
    * @throws CanceledOperationException
    *           if this operation should be cancelled
    */
-  void processLocalCompare(LocalBackendWorkflowElement wfe)
+  public void processLocalCompare(LocalBackendWorkflowElement wfe)
       throws CanceledOperationException
   {
     boolean executePostOpPlugins = false;
@@ -406,7 +414,7 @@
    * @throws  DirectoryException  If a problem occurs that should prevent the
    *                              operation from succeeding.
    */
-  private void handleRequestControls()
+  protected void handleRequestControls()
           throws DirectoryException
   {
     List<Control> requestControls = getRequestControls();
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendDeleteOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendDeleteOperation.java
index 424a1cf..d4f6f49 100644
--- a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendDeleteOperation.java
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendDeleteOperation.java
@@ -89,20 +89,30 @@
 
 
 
-  // The backend in which the operation is to be processed.
-  private Backend backend;
+  /**
+   * The backend in which the operation is to be processed.
+   */
+  protected Backend backend;
 
-  // Indicates whether the LDAP no-op control has been requested.
-  private boolean noOp;
+  /**
+   * Indicates whether the LDAP no-op control has been requested.
+   */
+  protected boolean noOp;
 
-  // The client connection on which this operation was requested.
-  private ClientConnection clientConnection;
+  /**
+   * The client connection on which this operation was requested.
+   */
+  protected ClientConnection clientConnection;
 
-  // The DN of the entry to be deleted.
-  private DN entryDN;
+  /**
+   * The DN of the entry to be deleted.
+   */
+  protected DN entryDN;
 
-  // The entry to be deleted.
-  private Entry entry;
+  /**
+   * The entry to be deleted.
+   */
+  protected Entry entry;
 
   // The pre-read request control included in the request, if applicable.
   private LDAPPreReadRequestControl preReadRequest;
@@ -144,7 +154,7 @@
    * @throws CanceledOperationException
    *           if this operation should be cancelled
    */
-  void processLocalDelete(final LocalBackendWorkflowElement wfe)
+  public void processLocalDelete(final LocalBackendWorkflowElement wfe)
       throws CanceledOperationException
   {
     boolean executePostOpPlugins = false;
@@ -488,7 +498,7 @@
    * @throws  DirectoryException  If a problem occurs that should cause the
    *                              operation to fail.
    */
-  private void handleRequestControls()
+  protected void handleRequestControls()
           throws DirectoryException
   {
     List<Control> requestControls = getRequestControls();
@@ -620,7 +630,7 @@
   /**
    * Performs any processing needed for the LDAP pre-read control.
    */
-  private void processPreReadControl()
+  protected void processPreReadControl()
   {
     if (preReadRequest != null)
     {
@@ -672,7 +682,12 @@
     }
   }
 
-  private boolean handleConflictResolution() {
+  /**
+   * Handle conflict resolution.
+   * @return  {@code true} if processing should continue for the operation, or
+   *          {@code false} if not.
+   */
+  protected boolean handleConflictResolution() {
       boolean returnVal = true;
 
       for (SynchronizationProvider<?> provider :
@@ -703,7 +718,10 @@
       return returnVal;
   }
 
-  private void processSynchPostOperationPlugins() {
+  /**
+   * Invoke post operation synchronization providers.
+   */
+  protected void processSynchPostOperationPlugins() {
 
       for (SynchronizationProvider<?> provider :
           DirectoryServer.getSynchronizationProviders()) {
@@ -722,7 +740,12 @@
       }
   }
 
-  private boolean processPreOperation() {
+  /**
+   * Process pre operation.
+   * @return  {@code true} if processing should continue for the operation, or
+   *          {@code false} if not.
+   */
+  protected boolean processPreOperation() {
       boolean returnVal = true;
 
       for (SynchronizationProvider<?> provider :
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendModifyDNOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendModifyDNOperation.java
index 29bfde0..7aac31d 100644
--- a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendModifyDNOperation.java
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendModifyDNOperation.java
@@ -104,23 +104,35 @@
 
 
 
-  // The backend in which the operation is to be processed.
-  private Backend backend;
+  /**
+   * The backend in which the operation is to be processed.
+   */
+  protected Backend backend;
 
-  // Indicates whether the no-op control was included in the request.
-  private boolean noOp;
+  /**
+   * Indicates whether the no-op control was included in the request.
+   */
+  protected boolean noOp;
 
-  // The client connection on which this operation was requested.
-  private ClientConnection clientConnection;
+  /**
+   * The client connection on which this operation was requested.
+   */
+  protected ClientConnection clientConnection;
 
-  // The original DN of the entry.
-  DN entryDN;
+  /**
+   * The original DN of the entry.
+   */
+  protected DN entryDN;
 
-  // The current entry, before it is renamed.
-  private Entry currentEntry;
+  /**
+   * The current entry, before it is renamed.
+   */
+  protected Entry currentEntry;
 
-  // The new entry, as it will appear after it has been renamed.
-  private Entry newEntry;
+  /**
+   * The new entry, as it will appear after it has been renamed.
+   */
+  protected Entry newEntry;
 
   // The LDAP post-read request control, if present in the request.
   private LDAPPostReadRequestControl postReadRequest;
@@ -128,8 +140,10 @@
   // The LDAP pre-read request control, if present in the request.
   private LDAPPreReadRequestControl preReadRequest;
 
-  // The new RDN for the entry.
-  private RDN newRDN;
+  /**
+   * The new RDN for the entry.
+   */
+  protected RDN newRDN;
 
 
 
@@ -187,7 +201,7 @@
    * @throws CanceledOperationException
    *           if this operation should be cancelled
    */
-  void processLocalModifyDN(final LocalBackendWorkflowElement wfe)
+  public void processLocalModifyDN(final LocalBackendWorkflowElement wfe)
       throws CanceledOperationException
   {
     boolean executePostOpPlugins = false;
@@ -684,7 +698,7 @@
    * @throws  DirectoryException  If a problem occurs that should cause the
    *                              modify DN operation to fail.
    */
-  private void handleRequestControls()
+  protected void handleRequestControls()
           throws DirectoryException
   {
     List<Control> requestControls = getRequestControls();
@@ -836,7 +850,7 @@
    * @throws  DirectoryException  If a problem occurs that should cause the
    *                              modify DN operation to fail.
    */
-  private void applyRDNChanges(List<Modification> modifications)
+  protected void applyRDNChanges(List<Modification> modifications)
           throws DirectoryException
   {
     // If we should delete the old RDN values from the entry, then do so.
@@ -951,7 +965,7 @@
    * @throws  DirectoryException  If a problem occurs that should cause the
    *                              modify DN operation to fail.
    */
-  private void applyPreOpModifications(List<Modification> modifications,
+  protected void applyPreOpModifications(List<Modification> modifications,
                                        int startPos)
           throws DirectoryException
   {
@@ -1007,7 +1021,7 @@
    * Performs any necessary processing to create the pre-read and/or post-read
    * response controls and attach them to the response.
    */
-  private void processReadEntryControls()
+  protected void processReadEntryControls()
   {
     if (preReadRequest != null)
     {
@@ -1109,7 +1123,12 @@
     }
   }
 
-  private boolean handleConflictResolution() {
+  /**
+   * Handle conflict resolution.
+   * @return  {@code true} if processing should continue for the operation, or
+   *          {@code false} if not.
+   */
+  protected boolean handleConflictResolution() {
       boolean returnVal = true;
 
       for (SynchronizationProvider<?> provider :
@@ -1141,7 +1160,12 @@
       return returnVal;
   }
 
-  private boolean processPreOperation() {
+  /**
+   * Process pre operation.
+   * @return  {@code true} if processing should continue for the operation, or
+   *          {@code false} if not.
+   */
+  protected boolean processPreOperation() {
       boolean returnVal = true;
 
       for (SynchronizationProvider<?> provider :
@@ -1171,7 +1195,10 @@
       return returnVal;
   }
 
-  private void processSynchPostOperationPlugins() {
+  /**
+   * Invoke post operation synchronization providers.
+   */
+  protected void processSynchPostOperationPlugins() {
       for (SynchronizationProvider<?> provider : DirectoryServer
               .getSynchronizationProviders()) {
           try {
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendModifyOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendModifyOperation.java
index dec33a3..6d011ca 100644
--- a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendModifyOperation.java
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendModifyOperation.java
@@ -97,45 +97,67 @@
 
 
 
-  // The backend in which the target entry exists.
-  private Backend backend;
+  /**
+   * The backend in which the target entry exists.
+   */
+  protected Backend backend;
 
   // Indicates whether the request included the user's current password.
   private boolean currentPasswordProvided;
 
-  // Indicates whether the user's account has been enabled or disabled by this
-  // modify operation.
-  private boolean enabledStateChanged;
+  /**
+   * Indicates whether the user's account has been enabled or disabled
+   * by this modify operation.
+   */
+  protected boolean enabledStateChanged;
 
   // Indicates whether the user's account is currently enabled.
   private boolean isEnabled;
 
-  // Indicates whether the request included the LDAP no-op control.
-  private boolean noOp;
+  /**
+   * Indicates whether the request included the LDAP no-op control.
+   */
+  protected boolean noOp;
 
-  // Indicates whether this modify operation includees a password change.
-  private boolean passwordChanged;
+  /**
+   * Indicates whether this modify operation includees a password change.
+   */
+  protected boolean passwordChanged;
 
-  // Indicates whether the request included the password policy request control.
-  private boolean pwPolicyControlRequested;
+  /**
+   * Indicates whether the request included the password policy request control.
+   */
+  protected boolean pwPolicyControlRequested;
 
-  // Indicates whether the password change is a self-change.
-  private boolean selfChange;
+  /**
+   * Indicates whether the password change is a self-change.
+   */
+  protected boolean selfChange;
 
-  // Indicates whether the user's account was locked before this change.
-  private boolean wasLocked;
+  /**
+   * Indicates whether the user's account was locked before this change.
+   */
+  protected boolean wasLocked;
 
-  // The client connection associated with this operation.
-  private ClientConnection clientConnection;
+  /**
+   * The client connection associated with this operation.
+   */
+  protected ClientConnection clientConnection;
 
-  // The DN of the entry to modify.
-  private DN entryDN;
+  /**
+   * The DN of the entry to modify.
+   */
+  protected DN entryDN;
 
-  // The current entry, before any changes are applied.
-  private Entry currentEntry = null;
+  /**
+   * The current entry, before any changes are applied.
+   */
+  protected Entry currentEntry = null;
 
-  // The modified entry that will be stored in the backend.
-  private Entry modifiedEntry = null;
+  /**
+   * The modified entry that will be stored in the backend.
+   */
+  protected Entry modifiedEntry = null;
 
   // The number of passwords contained in the modify operation.
   private int numPasswords;
@@ -152,14 +174,20 @@
   // The set of clear-text new passwords (if any were provided).
   private List<AttributeValue> newPasswords = null;
 
-  // The set of modifications contained in this request.
-  private List<Modification> modifications;
+  /**
+   * The set of modifications contained in this request.
+   */
+  protected List<Modification> modifications;
 
-  // The password policy error type for this operation.
-  private PasswordPolicyErrorType pwpErrorType;
+  /**
+   * The password policy error type for this operation.
+   */
+  protected PasswordPolicyErrorType pwpErrorType;
 
-  // The password policy state for this modify operation.
-  private PasswordPolicyState pwPolicyState;
+  /**
+   * The password policy state for this modify operation.
+   */
+  protected PasswordPolicyState pwPolicyState;
 
 
 
@@ -273,7 +301,7 @@
    * @throws CanceledOperationException
    *           if this operation should be cancelled
    */
-  void processLocalModify(final LocalBackendWorkflowElement wfe)
+  public void processLocalModify(final LocalBackendWorkflowElement wfe)
       throws CanceledOperationException
   {
     boolean executePostOpPlugins = false;
@@ -719,7 +747,7 @@
    * @throws  DirectoryException  If a problem is encountered with any of the
    *                              controls.
    */
-  private void processRequestControls()
+  protected void processRequestControls()
           throws DirectoryException
   {
     List<Control> requestControls = getRequestControls();
@@ -869,7 +897,7 @@
    * @throws  DirectoryException  If a problem is encountered that should cause
    *                              the modify operation to fail.
    */
-  private void handleSchemaProcessing() throws DirectoryException
+  protected void handleSchemaProcessing() throws DirectoryException
   {
 
     for (Modification m : modifications)
@@ -982,7 +1010,7 @@
    * @throws  DirectoryException  If a problem is encountered that should cause
    *                              the modify operation to fail.
    */
-  private void handleInitialPasswordPolicyProcessing()
+  protected void handleInitialPasswordPolicyProcessing()
           throws DirectoryException
   {
     // Declare variables used for password policy state processing.
@@ -1921,7 +1949,7 @@
    * @throws  DirectoryException  If the modify operation should not be allowed
    *                              as a result of the writability check.
    */
-  private void checkWritability()
+  protected void checkWritability()
           throws DirectoryException
   {
     // If it is not a private backend, then check to see if the server or
@@ -1968,7 +1996,7 @@
    * Handles any account status notifications that may be needed as a result of
    * modify processing.
    */
-  private void handleAccountStatusNotifications()
+  protected void handleAccountStatusNotifications()
   {
     if (passwordChanged)
     {
@@ -2037,7 +2065,7 @@
    * Handles any processing that is required for the LDAP pre-read and/or
    * post-read controls.
    */
-  private void handleReadEntryProcessing()
+  protected void handleReadEntryProcessing()
   {
     if (preReadRequest != null)
     {
@@ -2138,7 +2166,12 @@
 
 
 
-  private boolean handleConflictResolution() {
+  /**
+   * Handle conflict resolution.
+   * @return  {@code true} if processing should continue for the operation, or
+   *          {@code false} if not.
+   */
+  protected boolean handleConflictResolution() {
       boolean returnVal = true;
 
       for (SynchronizationProvider<?> provider :
@@ -2169,7 +2202,12 @@
       return returnVal;
   }
 
-  private boolean processPreOperation() {
+  /**
+   * Process pre operation.
+   * @return  {@code true} if processing should continue for the operation, or
+   *          {@code false} if not.
+   */
+  protected boolean processPreOperation() {
       boolean returnVal = true;
       for (SynchronizationProvider<?> provider :
           DirectoryServer.getSynchronizationProviders()) {
@@ -2198,7 +2236,10 @@
       return returnVal;
   }
 
-  private void processSynchPostOperationPlugins() {
+  /**
+   * Invoke post operation synchronization providers.
+   */
+  protected void processSynchPostOperationPlugins() {
       for (SynchronizationProvider<?> provider :
           DirectoryServer.getSynchronizationProviders()) {
           try {
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendSearchOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendSearchOperation.java
index 6c85d95..33e96a1 100644
--- a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendSearchOperation.java
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendSearchOperation.java
@@ -22,7 +22,7 @@
  * CDDL HEADER END
  *
  *
- *      Copyright 2008 Sun Microsystems, Inc.
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
  */
 package org.opends.server.workflowelement.localbackend;
 
@@ -82,24 +82,36 @@
 
 
 
-  // The backend in which the search is to be performed.
-  private Backend backend;
+  /**
+   * The backend in which the search is to be performed.
+   */
+  protected Backend backend;
 
-  // Indicates whether we should actually process the search.  This should
-  // only be false if it's a persistent search with changesOnly=true.
-  private boolean processSearch;
+  /**
+   * Indicates whether we should actually process the search.  This should
+   * only be false if it's a persistent search with changesOnly=true.
+   */
+  protected boolean processSearch;
 
-  // The client connection for the search operation.
-  private ClientConnection clientConnection;
+  /**
+   * The client connection for the search operation.
+   */
+  protected ClientConnection clientConnection;
 
-  // The base DN for the search.
-  private DN baseDN;
+  /**
+   * The base DN for the search.
+   */
+  protected DN baseDN;
 
-  // The persistent search request, if applicable.
-  private PersistentSearch persistentSearch;
+  /**
+   * The persistent search request, if applicable.
+   */
+  protected PersistentSearch persistentSearch;
 
-  // The filter for the search.
-  private SearchFilter filter;
+  /**
+   * The filter for the search.
+   */
+  protected SearchFilter filter;
 
 
 
@@ -125,7 +137,7 @@
    * @throws CanceledOperationException
    *           if this operation should be cancelled
    */
-  void processLocalSearch(LocalBackendWorkflowElement wfe)
+  public void processLocalSearch(LocalBackendWorkflowElement wfe)
       throws CanceledOperationException
   {
     boolean executePostOpPlugins = false;
@@ -316,7 +328,7 @@
    * @throws  DirectoryException  If there is a problem with any of the request
    *                              controls.
    */
-  private void handleRequestControls()
+  protected void handleRequestControls()
           throws DirectoryException
   {
     List<Control> requestControls  = getRequestControls();
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendWorkflowElement.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendWorkflowElement.java
index 839ae6e..f3b7bbd 100644
--- a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendWorkflowElement.java
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/localbackend/LocalBackendWorkflowElement.java
@@ -22,7 +22,7 @@
  * CDDL HEADER END
  *
  *
- *      Copyright 2008 Sun Microsystems, Inc.
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
  */
 package org.opends.server.workflowelement.localbackend;
 
@@ -82,7 +82,7 @@
 
   // a lock to guarantee safe concurrent access to the registeredLocalBackends
   // variable
-  private static Object registeredLocalBackendsLock = new Object();
+  private static final Object registeredLocalBackendsLock = new Object();
 
 
   // A string indicating the type of the workflow element.
@@ -154,6 +154,7 @@
   /**
    * {@inheritDoc}
    */
+  @Override
   public void finalizeWorkflowElement()
   {
     // null all fields so that any use of the finalized object will raise
@@ -477,7 +478,7 @@
    * @return The backend associated with this local backend workflow
    *         element.
    */
-  Backend getBackend()
+  public Backend getBackend()
   {
     return backend;
   }
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBAddOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBAddOperation.java
new file mode 100644
index 0000000..4d1ef5c
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBAddOperation.java
@@ -0,0 +1,661 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.workflowelement.ndb;
+
+
+
+import com.mysql.cluster.ndbj.NdbOperation;
+import static org.opends.messages.CoreMessages.*;
+import static org.opends.server.config.ConfigConstants.*;
+import static org.opends.server.loggers.ErrorLogger.*;
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import static org.opends.server.util.StaticUtils.*;
+
+import java.util.HashSet;
+
+import org.opends.messages.Message;
+import org.opends.server.api.ChangeNotificationListener;
+import org.opends.server.api.ClientConnection;
+import org.opends.server.api.SynchronizationProvider;
+import org.opends.server.api.plugin.PluginResult;
+import org.opends.server.backends.ndb.AbstractTransaction;
+import org.opends.server.backends.ndb.BackendImpl;
+import org.opends.server.core.AccessControlConfigManager;
+import org.opends.server.core.AddOperation;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.core.PluginConfigManager;
+import org.opends.server.loggers.debug.DebugTracer;
+import org.opends.server.types.AttributeType;
+import org.opends.server.types.CanceledOperationException;
+import org.opends.server.types.DN;
+import org.opends.server.types.DebugLogLevel;
+import org.opends.server.types.DirectoryException;
+import org.opends.server.types.Entry;
+import org.opends.server.types.ObjectClass;
+import org.opends.server.types.Privilege;
+import org.opends.server.types.ResultCode;
+import org.opends.server.types.SynchronizationProviderResult;
+import org.opends.server.workflowelement.localbackend.LocalBackendAddOperation;
+import
+  org.opends.server.workflowelement.localbackend.LocalBackendWorkflowElement;
+
+
+
+/**
+ * This class defines an operation used to add an entry in a local backend
+ * of the Directory Server.
+ */
+public class NDBAddOperation
+       extends LocalBackendAddOperation
+{
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+
+
+  /**
+   * Creates a new operation that may be used to add a new entry in a
+   * local backend of the Directory Server.
+   *
+   * @param add The operation to enhance.
+   */
+  public NDBAddOperation(AddOperation add)
+  {
+    super(add);
+
+    NDBWorkflowElement.attachLocalOperation (add, this);
+  }
+
+
+
+  /**
+   * Process this add operation against a local backend.
+   *
+   * @param  wfe The local backend work-flow element.
+   *
+   * @throws CanceledOperationException if this operation should be
+   * cancelled
+   */
+  @Override
+  public void processLocalAdd(final LocalBackendWorkflowElement wfe)
+    throws CanceledOperationException {
+    boolean executePostOpPlugins = false;
+
+    this.backend = wfe.getBackend();
+    BackendImpl ndbBackend = (BackendImpl) backend;
+    ClientConnection clientConnection = getClientConnection();
+
+    // Get the plugin config manager that will be used for invoking plugins.
+    PluginConfigManager pluginConfigManager =
+         DirectoryServer.getPluginConfigManager();
+
+    // Check for a request to cancel this operation.
+    checkIfCanceled(false);
+
+    // Create a labeled block of code that we can break out of if a problem is
+    // detected.
+addProcessing:
+    {
+      // Process the entry DN and set of attributes to convert them from their
+      // raw forms as provided by the client to the forms required for the rest
+      // of the add processing.
+      entryDN = getEntryDN();
+      if (entryDN == null)
+      {
+        break addProcessing;
+      }
+
+      objectClasses = getObjectClasses();
+      userAttributes = getUserAttributes();
+      operationalAttributes = getOperationalAttributes();
+
+      if ((objectClasses == null ) || (userAttributes == null) ||
+          (operationalAttributes == null))
+      {
+        break addProcessing;
+      }
+
+      // Check for a request to cancel this operation.
+      checkIfCanceled(false);
+
+      DN parentDN = entryDN.getParentDNInSuffix();
+
+      AbstractTransaction txn =
+        new AbstractTransaction(ndbBackend.getRootContainer());
+
+      try
+      {
+        // Check for a request to cancel this operation.
+        checkIfCanceled(false);
+
+        // Invoke any conflict resolution processing that might be needed by the
+        // synchronization provider.
+        for (SynchronizationProvider provider :
+             DirectoryServer.getSynchronizationProviders())
+        {
+          try
+          {
+            SynchronizationProviderResult result =
+                provider.handleConflictResolution(this);
+            if (! result.continueProcessing())
+            {
+              setResultCode(result.getResultCode());
+              appendErrorMessage(result.getErrorMessage());
+              setMatchedDN(result.getMatchedDN());
+              setReferralURLs(result.getReferralURLs());
+              break addProcessing;
+            }
+          }
+          catch (DirectoryException de)
+          {
+            if (debugEnabled())
+            {
+              TRACER.debugCaught(DebugLogLevel.ERROR, de);
+            }
+
+            logError(ERR_ADD_SYNCH_CONFLICT_RESOLUTION_FAILED.get(
+                          getConnectionID(), getOperationID(),
+                          getExceptionMessage(de)));
+
+            setResponseData(de);
+            break addProcessing;
+          }
+        }
+
+        for (AttributeType at : userAttributes.keySet())
+        {
+          // If the attribute type is marked "NO-USER-MODIFICATION" then fail
+          // unless this is an internal operation or is related to
+          // synchronization in some way.
+          // This must be done before running the password policy code
+          // and any other code that may add attributes marked as
+          // "NO-USER-MODIFICATION"
+          //
+          // Note that doing this checks at this time
+          // of the processing does not make it possible for pre-parse plugins
+          // to add NO-USER-MODIFICATION attributes to the entry.
+          if (at.isNoUserModification())
+          {
+            if (! (isInternalOperation() || isSynchronizationOperation()))
+            {
+              setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+              appendErrorMessage(ERR_ADD_ATTR_IS_NO_USER_MOD.get(
+                                      String.valueOf(entryDN),
+                                      at.getNameOrOID()));
+
+              break addProcessing;
+            }
+          }
+        }
+
+        for (AttributeType at : operationalAttributes.keySet())
+        {
+          if (at.isNoUserModification())
+          {
+            if (! (isInternalOperation() || isSynchronizationOperation()))
+            {
+              setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+              appendErrorMessage(ERR_ADD_ATTR_IS_NO_USER_MOD.get(
+                                      String.valueOf(entryDN),
+                                      at.getNameOrOID()));
+
+              break addProcessing;
+            }
+          }
+        }
+
+        // Get the parent entry, if it exists.
+        Entry parentEntry = null;
+        if (parentDN != null)
+        {
+          try
+          {
+            parentEntry = ndbBackend.getEntryNoCommit(parentDN, txn,
+              NdbOperation.LockMode.LM_Read);
+            if (parentEntry == null)
+            {
+              DN matchedDN = parentDN.getParentDNInSuffix();
+              while (matchedDN != null)
+              {
+                try
+                {
+                  if (DirectoryServer.entryExists(matchedDN))
+                  {
+                    setMatchedDN(matchedDN);
+                    break;
+                  }
+                }
+                catch (Exception e)
+                {
+                  if (debugEnabled())
+                  {
+                    TRACER.debugCaught(DebugLogLevel.ERROR, e);
+                  }
+                  break;
+                }
+
+                matchedDN = matchedDN.getParentDNInSuffix();
+              }
+
+
+              // The parent doesn't exist, so this add can't be successful.
+              setResultCode(ResultCode.NO_SUCH_OBJECT);
+              appendErrorMessage(ERR_ADD_NO_PARENT.get(String.valueOf(entryDN),
+                                      String.valueOf(parentDN)));
+              break addProcessing;
+            }
+          }
+          catch (DirectoryException de)
+          {
+            if (debugEnabled())
+            {
+              TRACER.debugCaught(DebugLogLevel.ERROR, de);
+            }
+
+            setResponseData(de);
+            break addProcessing;
+          }
+        }
+
+
+        // Check to make sure that all of the RDN attributes are included as
+        // attribute values.  If not, then either add them or report an error.
+        try
+        {
+          addRDNAttributesIfNecessary();
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break addProcessing;
+        }
+
+
+        // Check to make sure that all objectclasses have their superior classes
+        // listed in the entry.  If not, then add them.
+        HashSet<ObjectClass> additionalClasses = null;
+        for (ObjectClass oc : objectClasses.keySet())
+        {
+          ObjectClass superiorClass = oc.getSuperiorClass();
+          if ((superiorClass != null) &&
+              (! objectClasses.containsKey(superiorClass)))
+          {
+            if (additionalClasses == null)
+            {
+              additionalClasses = new HashSet<ObjectClass>();
+            }
+
+            additionalClasses.add(superiorClass);
+          }
+        }
+
+        if (additionalClasses != null)
+        {
+          for (ObjectClass oc : additionalClasses)
+          {
+            addObjectClassChain(oc);
+          }
+        }
+
+
+        // Create an entry object to encapsulate the set of attributes and
+        // objectclasses.
+        entry = new Entry(entryDN, objectClasses, userAttributes,
+                          operationalAttributes);
+
+        // Check to see if the entry includes a privilege specification.  If so,
+        // then the requester must have the PRIVILEGE_CHANGE privilege.
+        AttributeType privType =
+             DirectoryServer.getAttributeType(OP_ATTR_PRIVILEGE_NAME, true);
+        if (entry.hasAttribute(privType) &&
+            (! clientConnection.hasPrivilege(Privilege.PRIVILEGE_CHANGE, this)))
+        {
+
+          appendErrorMessage(
+               ERR_ADD_CHANGE_PRIVILEGE_INSUFFICIENT_PRIVILEGES.get());
+          setResultCode(ResultCode.INSUFFICIENT_ACCESS_RIGHTS);
+          break addProcessing;
+        }
+
+
+        // If it's not a synchronization operation, then check
+        // to see if the entry contains one or more passwords and if they
+        // are valid in accordance with the password policies associated with
+        // the user.  Also perform any encoding that might be required by
+        // password storage schemes.
+        if (! isSynchronizationOperation())
+        {
+          try
+          {
+            handlePasswordPolicy();
+          }
+          catch (DirectoryException de)
+          {
+            if (debugEnabled())
+            {
+              TRACER.debugCaught(DebugLogLevel.ERROR, de);
+            }
+
+            setResponseData(de);
+            break addProcessing;
+          }
+        }
+
+
+        // If the server is configured to check schema and the
+        // operation is not a synchronization operation,
+        // check to see if the entry is valid according to the server schema,
+        // and also whether its attributes are valid according to their syntax.
+        if ((DirectoryServer.checkSchema()) && (! isSynchronizationOperation()))
+        {
+          try
+          {
+            checkSchema(parentEntry);
+          }
+          catch (DirectoryException de)
+          {
+            if (debugEnabled())
+            {
+              TRACER.debugCaught(DebugLogLevel.ERROR, de);
+            }
+
+            setResponseData(de);
+            break addProcessing;
+          }
+        }
+
+
+        // Get the backend in which the add is to be performed.
+        if (backend == null)
+        {
+          setResultCode(ResultCode.NO_SUCH_OBJECT);
+          appendErrorMessage(Message.raw("No backend for entry " +
+                                         entryDN.toString())); // TODO: i18n
+          break addProcessing;
+        }
+
+
+        // Check to see if there are any controls in the request. If so, then
+        // see if there is any special processing required.
+        try
+        {
+          processControls(parentDN);
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break addProcessing;
+        }
+
+
+        // Check to see if the client has permission to perform the add.
+
+        // FIXME: for now assume that this will check all permission
+        // pertinent to the operation. This includes proxy authorization
+        // and any other controls specified.
+
+        // FIXME: earlier checks to see if the entry already exists or
+        // if the parent entry does not exist may have already exposed
+        // sensitive information to the client.
+        if (AccessControlConfigManager.getInstance().getAccessControlHandler().
+                 isAllowed(this) == false)
+        {
+          setResultCode(ResultCode.INSUFFICIENT_ACCESS_RIGHTS);
+          appendErrorMessage(ERR_ADD_AUTHZ_INSUFFICIENT_ACCESS_RIGHTS.get(
+                                  String.valueOf(entryDN)));
+          break addProcessing;
+        }
+
+        // Check for a request to cancel this operation.
+        checkIfCanceled(false);
+
+        // If the operation is not a synchronization operation,
+        // Invoke the pre-operation add plugins.
+        if (! isSynchronizationOperation())
+        {
+          executePostOpPlugins = true;
+          PluginResult.PreOperation preOpResult =
+            pluginConfigManager.invokePreOperationAddPlugins(this);
+          if (!preOpResult.continueProcessing())
+          {
+            setResultCode(preOpResult.getResultCode());
+            appendErrorMessage(preOpResult.getErrorMessage());
+            setMatchedDN(preOpResult.getMatchedDN());
+            setReferralURLs(preOpResult.getReferralURLs());
+            break addProcessing;
+          }
+        }
+
+
+        // If it is not a private backend, then check to see if the server or
+        // backend is operating in read-only mode.
+        if (! backend.isPrivateBackend())
+        {
+          switch (DirectoryServer.getWritabilityMode())
+          {
+            case DISABLED:
+              setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+              appendErrorMessage(ERR_ADD_SERVER_READONLY.get(
+                                      String.valueOf(entryDN)));
+              break addProcessing;
+
+            case INTERNAL_ONLY:
+              if (! (isInternalOperation() || isSynchronizationOperation()))
+              {
+                setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+                appendErrorMessage(ERR_ADD_SERVER_READONLY.get(
+                                        String.valueOf(entryDN)));
+                break addProcessing;
+              }
+              break;
+          }
+
+          switch (backend.getWritabilityMode())
+          {
+            case DISABLED:
+              setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+              appendErrorMessage(ERR_ADD_BACKEND_READONLY.get(
+                                      String.valueOf(entryDN)));
+              break addProcessing;
+
+            case INTERNAL_ONLY:
+              if (! (isInternalOperation() || isSynchronizationOperation()))
+              {
+                setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+                appendErrorMessage(ERR_ADD_BACKEND_READONLY.get(
+                                        String.valueOf(entryDN)));
+                break addProcessing;
+              }
+              break;
+          }
+        }
+
+
+        try
+        {
+          if (noOp)
+          {
+            appendErrorMessage(INFO_ADD_NOOP.get());
+            setResultCode(ResultCode.NO_OPERATION);
+          }
+          else
+          {
+            for (SynchronizationProvider provider :
+                 DirectoryServer.getSynchronizationProviders())
+            {
+              try
+              {
+                SynchronizationProviderResult result =
+                    provider.doPreOperation(this);
+                if (! result.continueProcessing())
+                {
+                  setResultCode(result.getResultCode());
+                  appendErrorMessage(result.getErrorMessage());
+                  setMatchedDN(result.getMatchedDN());
+                  setReferralURLs(result.getReferralURLs());
+                  break addProcessing;
+                }
+              }
+              catch (DirectoryException de)
+              {
+                if (debugEnabled())
+                {
+                  TRACER.debugCaught(DebugLogLevel.ERROR, de);
+                }
+
+                logError(ERR_ADD_SYNCH_PREOP_FAILED.get(getConnectionID(),
+                              getOperationID(), getExceptionMessage(de)));
+                setResponseData(de);
+                break addProcessing;
+              }
+            }
+
+            ndbBackend.addEntry(entry, this, txn);
+          }
+
+          if (postReadRequest != null)
+          {
+            addPostReadResponse();
+          }
+
+
+          if (! noOp)
+          {
+            setResultCode(ResultCode.SUCCESS);
+          }
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break addProcessing;
+        }
+      }
+      finally
+      {
+        for (SynchronizationProvider provider :
+          DirectoryServer.getSynchronizationProviders())
+        {
+          try
+          {
+            provider.doPostOperation(this);
+          }
+          catch (DirectoryException de)
+          {
+            if (debugEnabled())
+            {
+              TRACER.debugCaught(DebugLogLevel.ERROR, de);
+            }
+
+            logError(ERR_ADD_SYNCH_POSTOP_FAILED.get(getConnectionID(),
+                getOperationID(), getExceptionMessage(de)));
+            setResponseData(de);
+            break;
+          }
+        }
+        try {
+          txn.close();
+        } catch (Exception ex) {
+          if (debugEnabled()) {
+            TRACER.debugCaught(DebugLogLevel.ERROR, ex);
+          }
+        }
+      }
+    }
+
+    // Invoke the post-operation or post-synchronization add plugins.
+    if (isSynchronizationOperation())
+    {
+      if (getResultCode() == ResultCode.SUCCESS)
+      {
+        pluginConfigManager.invokePostSynchronizationAddPlugins(this);
+      }
+    }
+    else if (executePostOpPlugins)
+    {
+      // FIXME -- Should this also be done while holding the locks?
+      PluginResult.PostOperation postOpResult =
+          pluginConfigManager.invokePostOperationAddPlugins(this);
+      if(!postOpResult.continueProcessing())
+      {
+        setResultCode(postOpResult.getResultCode());
+        appendErrorMessage(postOpResult.getErrorMessage());
+        setMatchedDN(postOpResult.getMatchedDN());
+        setReferralURLs(postOpResult.getReferralURLs());
+        return;
+      }
+    }
+
+
+    // Register a post-response call-back which will notify persistent
+    // searches and change listeners.
+    if (getResultCode() == ResultCode.SUCCESS)
+    {
+      registerPostResponseCallback(new Runnable()
+      {
+        public void run()
+        {
+          // Notify change listeners.
+          for (ChangeNotificationListener changeListener : DirectoryServer
+              .getChangeNotificationListeners())
+          {
+            try
+            {
+              changeListener.handleAddOperation(NDBAddOperation.this, entry);
+            }
+            catch (Exception e)
+            {
+              if (debugEnabled())
+              {
+                TRACER.debugCaught(DebugLogLevel.ERROR, e);
+              }
+
+              logError(ERR_ADD_ERROR_NOTIFYING_CHANGE_LISTENER
+                  .get(getExceptionMessage(e)));
+            }
+          }
+        }
+      });
+    }
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBBindOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBBindOperation.java
new file mode 100644
index 0000000..4cf774d
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBBindOperation.java
@@ -0,0 +1,251 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.workflowelement.ndb;
+
+
+
+import java.util.List;
+
+import org.opends.messages.Message;
+import org.opends.server.api.plugin.PluginResult;
+import org.opends.server.core.BindOperation;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.core.PasswordPolicyState;
+import org.opends.server.loggers.debug.DebugTracer;
+import org.opends.server.types.AccountStatusNotification;
+import org.opends.server.types.AccountStatusNotificationType;
+import org.opends.server.types.Attribute;
+import org.opends.server.types.AttributeType;
+import org.opends.server.types.AuthenticationInfo;
+import org.opends.server.types.ByteString;
+import org.opends.server.types.DebugLogLevel;
+import org.opends.server.types.DirectoryException;
+import org.opends.server.types.DN;
+import org.opends.server.types.Entry;
+import org.opends.server.types.ResultCode;
+
+import org.opends.server.workflowelement.localbackend.LocalBackendBindOperation;
+import static org.opends.messages.CoreMessages.*;
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import static org.opends.server.util.StaticUtils.*;
+
+
+
+/**
+ * This class defines an operation used to bind against the Directory Server,
+ * with the bound user entry within a local backend.
+ */
+public class NDBBindOperation
+       extends LocalBackendBindOperation
+{
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+
+
+  /**
+   * Creates a new operation that may be used to bind where
+   * the bound user entry is stored in a local backend of the Directory Server.
+   *
+   * @param bind The operation to enhance.
+   */
+  public NDBBindOperation(BindOperation bind)
+  {
+    super(bind);
+    NDBWorkflowElement.attachLocalOperation (bind, this);
+  }
+
+
+
+  /**
+   * Performs the processing necessary for a simple bind operation.
+   *
+   * @return  {@code true} if processing should continue for the operation, or
+   *          {@code false} if not.
+   *
+   * @throws  DirectoryException  If a problem occurs that should cause the bind
+   *                              operation to fail.
+   */
+  @Override
+  protected boolean processSimpleBind()
+          throws DirectoryException
+  {
+    // See if this is an anonymous bind.  If so, then determine whether
+    // to allow it.
+    ByteString simplePassword = getSimplePassword();
+    if ((simplePassword == null) || (simplePassword.length() == 0))
+    {
+      return processAnonymousSimpleBind();
+    }
+
+    // See if the bind DN is actually one of the alternate root DNs
+    // defined in the server.  If so, then replace it with the actual DN
+    // for that user.
+    DN actualRootDN = DirectoryServer.getActualRootBindDN(bindDN);
+    if (actualRootDN != null)
+    {
+      bindDN = actualRootDN;
+    }
+
+    // Get the user entry based on the bind DN.  If it does not exist,
+    // then fail.
+    Entry userEntry;
+    try {
+      userEntry = backend.getEntry(bindDN);
+    } catch (DirectoryException de) {
+      if (debugEnabled()) {
+        TRACER.debugCaught(DebugLogLevel.ERROR, de);
+      }
+
+      userEntry = null;
+      throw new DirectoryException(ResultCode.INVALID_CREDENTIALS,
+        de.getMessageObject());
+    }
+
+    if (userEntry == null) {
+      throw new DirectoryException(ResultCode.INVALID_CREDENTIALS,
+        ERR_BIND_OPERATION_UNKNOWN_USER.get(
+        String.valueOf(bindDN)));
+    } else {
+      setUserEntryDN(userEntry.getDN());
+    }
+
+
+    // Check to see if the user has a password.  If not, then fail.
+    // FIXME -- We need to have a way to enable/disable debugging.
+    pwPolicyState = new PasswordPolicyState(userEntry, false);
+    policy = pwPolicyState.getPolicy();
+    AttributeType pwType = policy.getPasswordAttribute();
+
+    List<Attribute> pwAttr = userEntry.getAttribute(pwType);
+    if ((pwAttr == null) || (pwAttr.isEmpty())) {
+      throw new DirectoryException(ResultCode.INVALID_CREDENTIALS,
+        ERR_BIND_OPERATION_NO_PASSWORD.get(
+        String.valueOf(bindDN)));
+    }
+
+
+    // Perform a number of password policy state checks for the user.
+    checkPasswordPolicyState(userEntry, null);
+
+
+    // Invoke the pre-operation bind plugins.
+    executePostOpPlugins = true;
+    PluginResult.PreOperation preOpResult =
+      pluginConfigManager.invokePreOperationBindPlugins(this);
+    if (!preOpResult.continueProcessing()) {
+      setResultCode(preOpResult.getResultCode());
+      appendErrorMessage(preOpResult.getErrorMessage());
+      setMatchedDN(preOpResult.getMatchedDN());
+      setReferralURLs(preOpResult.getReferralURLs());
+      return false;
+    }
+
+
+    // Determine whether the provided password matches any of the stored
+    // passwords for the user.
+    if (pwPolicyState.passwordMatches(simplePassword)) {
+      setResultCode(ResultCode.SUCCESS);
+
+      boolean isRoot = DirectoryServer.isRootDN(userEntry.getDN());
+      if (DirectoryServer.lockdownMode() && (!isRoot)) {
+        throw new DirectoryException(ResultCode.INVALID_CREDENTIALS,
+          ERR_BIND_REJECTED_LOCKDOWN_MODE.get());
+      }
+      setAuthenticationInfo(new AuthenticationInfo(userEntry,
+        simplePassword,
+        isRoot));
+
+
+      // Set resource limits for the authenticated user.
+      setResourceLimits(userEntry);
+
+
+      // Perform any remaining processing for a successful simple
+      // authentication.
+      pwPolicyState.handleDeprecatedStorageSchemes(simplePassword);
+      pwPolicyState.clearFailureLockout();
+
+      if (isFirstWarning) {
+        pwPolicyState.setWarnedTime();
+
+        int numSeconds = pwPolicyState.getSecondsUntilExpiration();
+        Message m = WARN_BIND_PASSWORD_EXPIRING.get(
+          secondsToTimeString(numSeconds));
+
+        pwPolicyState.generateAccountStatusNotification(
+          AccountStatusNotificationType.PASSWORD_EXPIRING, userEntry, m,
+          AccountStatusNotification.createProperties(pwPolicyState,
+          false, numSeconds, null, null));
+      }
+
+      if (isGraceLogin) {
+        pwPolicyState.updateGraceLoginTimes();
+      }
+
+      pwPolicyState.setLastLoginTime();
+    } else {
+      setResultCode(ResultCode.INVALID_CREDENTIALS);
+      setAuthFailureReason(ERR_BIND_OPERATION_WRONG_PASSWORD.get());
+
+      if (policy.getLockoutFailureCount() > 0) {
+        pwPolicyState.updateAuthFailureTimes();
+        if (pwPolicyState.lockedDueToFailures()) {
+          AccountStatusNotificationType notificationType;
+          Message m;
+
+          boolean tempLocked;
+          int lockoutDuration = pwPolicyState.getSecondsUntilUnlock();
+          if (lockoutDuration > -1) {
+            notificationType =
+              AccountStatusNotificationType.ACCOUNT_TEMPORARILY_LOCKED;
+            tempLocked = true;
+
+            m = ERR_BIND_ACCOUNT_TEMPORARILY_LOCKED.get(
+              secondsToTimeString(lockoutDuration));
+          } else {
+            notificationType =
+              AccountStatusNotificationType.ACCOUNT_PERMANENTLY_LOCKED;
+            tempLocked = false;
+
+            m = ERR_BIND_ACCOUNT_PERMANENTLY_LOCKED.get();
+          }
+
+          pwPolicyState.generateAccountStatusNotification(
+            notificationType, userEntry, m,
+            AccountStatusNotification.createProperties(pwPolicyState,
+            tempLocked, -1, null, null));
+        }
+      }
+    }
+
+    return true;
+  }
+}
+
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBCompareOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBCompareOperation.java
new file mode 100644
index 0000000..b1c6212
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBCompareOperation.java
@@ -0,0 +1,309 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.workflowelement.ndb;
+
+
+
+import java.util.HashSet;
+import java.util.List;
+
+import org.opends.server.api.ClientConnection;
+import org.opends.server.api.plugin.PluginResult;
+import org.opends.server.core.AccessControlConfigManager;
+import org.opends.server.core.CompareOperation;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.core.PluginConfigManager;
+import org.opends.server.loggers.debug.DebugTracer;
+import org.opends.server.types.*;
+import org.opends.server.types.operation.PostOperationCompareOperation;
+import org.opends.server.types.operation.PostResponseCompareOperation;
+import org.opends.server.types.operation.PreOperationCompareOperation;
+
+import
+  org.opends.server.workflowelement.localbackend.LocalBackendCompareOperation;
+import
+  org.opends.server.workflowelement.localbackend.LocalBackendWorkflowElement;
+import static org.opends.messages.CoreMessages.*;
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import static org.opends.server.util.StaticUtils.*;
+
+
+
+/**
+ * This class defines an operation that may be used to determine whether a
+ * specified entry in the Directory Server contains a given attribute-value
+ * pair.
+ */
+public class NDBCompareOperation
+       extends LocalBackendCompareOperation
+       implements PreOperationCompareOperation, PostOperationCompareOperation,
+                  PostResponseCompareOperation
+{
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+
+
+  /**
+   * Creates a new compare operation based on the provided compare operation.
+   *
+   * @param compare  the compare operation
+   */
+  public NDBCompareOperation(CompareOperation compare)
+  {
+    super(compare);
+    NDBWorkflowElement.attachLocalOperation (compare, this);
+  }
+
+
+
+  /**
+   * Process this compare operation in a NDB backend.
+   *
+   * @param  wfe The local backend work-flow element.
+   *
+   * @throws CanceledOperationException if this operation should be
+   * cancelled
+   */
+  @Override
+  public void processLocalCompare(LocalBackendWorkflowElement wfe)
+    throws CanceledOperationException {
+    boolean executePostOpPlugins = false;
+
+    this.backend = wfe.getBackend();
+
+    clientConnection  = getClientConnection();
+
+    // Get the plugin config manager that will be used for invoking plugins.
+    PluginConfigManager pluginConfigManager =
+         DirectoryServer.getPluginConfigManager();
+
+
+    // Get a reference to the client connection
+    ClientConnection clientConnection = getClientConnection();
+
+
+    // Check for a request to cancel this operation.
+    checkIfCanceled(false);
+
+
+    // Create a labeled block of code that we can break out of if a problem is
+    // detected.
+compareProcessing:
+    {
+      // Process the entry DN to convert it from the raw form to the form
+      // required for the rest of the compare processing.
+      entryDN = getEntryDN();
+      if (entryDN == null)
+      {
+        break compareProcessing;
+      }
+
+
+      // If the target entry is in the server configuration, then make sure the
+      // requester has the CONFIG_READ privilege.
+      if (DirectoryServer.getConfigHandler().handlesEntry(entryDN) &&
+          (! clientConnection.hasPrivilege(Privilege.CONFIG_READ, this)))
+      {
+        appendErrorMessage(ERR_COMPARE_CONFIG_INSUFFICIENT_PRIVILEGES.get());
+        setResultCode(ResultCode.INSUFFICIENT_ACCESS_RIGHTS);
+        break compareProcessing;
+      }
+
+
+      // Check for a request to cancel this operation.
+      checkIfCanceled(false);
+
+      // Get the entry.  If it does not exist, then fail.
+      try {
+        entry = DirectoryServer.getEntry(entryDN);
+
+        if (entry == null) {
+          setResultCode(ResultCode.NO_SUCH_OBJECT);
+          appendErrorMessage(
+            ERR_COMPARE_NO_SUCH_ENTRY.get(String.valueOf(entryDN)));
+
+          // See if one of the entry's ancestors exists.
+          DN parentDN = entryDN.getParentDNInSuffix();
+          while (parentDN != null) {
+            try {
+              if (DirectoryServer.entryExists(parentDN)) {
+                setMatchedDN(parentDN);
+                break;
+              }
+            } catch (Exception e) {
+              if (debugEnabled()) {
+                TRACER.debugCaught(DebugLogLevel.ERROR, e);
+              }
+              break;
+            }
+
+            parentDN = parentDN.getParentDNInSuffix();
+          }
+
+          break compareProcessing;
+        }
+      } catch (DirectoryException de) {
+        if (debugEnabled()) {
+          TRACER.debugCaught(DebugLogLevel.ERROR, de);
+        }
+
+        setResultCode(de.getResultCode());
+        appendErrorMessage(de.getMessageObject());
+        break compareProcessing;
+      }
+
+      // Check to see if there are any controls in the request.  If so, then
+      // see if there is any special processing required.
+      try {
+        handleRequestControls();
+      } catch (DirectoryException de) {
+        if (debugEnabled()) {
+          TRACER.debugCaught(DebugLogLevel.ERROR, de);
+        }
+
+        setResponseData(de);
+        break compareProcessing;
+      }
+
+
+      // Check to see if the client has permission to perform the
+      // compare.
+
+      // FIXME: for now assume that this will check all permission
+      // pertinent to the operation. This includes proxy authorization
+      // and any other controls specified.
+
+      // FIXME: earlier checks to see if the entry already exists may
+      // have already exposed sensitive information to the client.
+      if (!AccessControlConfigManager.getInstance().
+        getAccessControlHandler().isAllowed(this)) {
+        setResultCode(ResultCode.INSUFFICIENT_ACCESS_RIGHTS);
+        appendErrorMessage(ERR_COMPARE_AUTHZ_INSUFFICIENT_ACCESS_RIGHTS.get(
+          String.valueOf(entryDN)));
+        break compareProcessing;
+      }
+
+      // Check for a request to cancel this operation.
+      checkIfCanceled(false);
+
+
+      // Invoke the pre-operation compare plugins.
+      executePostOpPlugins = true;
+      PluginResult.PreOperation preOpResult =
+        pluginConfigManager.invokePreOperationComparePlugins(this);
+      if (!preOpResult.continueProcessing()) {
+        setResultCode(preOpResult.getResultCode());
+        appendErrorMessage(preOpResult.getErrorMessage());
+        setMatchedDN(preOpResult.getMatchedDN());
+        setReferralURLs(preOpResult.getReferralURLs());
+        break compareProcessing;
+      }
+
+
+      // Get the base attribute type and set of options.
+      String baseName;
+      HashSet<String> options;
+      String rawAttributeType = getRawAttributeType();
+      int semicolonPos = rawAttributeType.indexOf(';');
+      if (semicolonPos > 0) {
+        baseName = toLowerCase(rawAttributeType.substring(0, semicolonPos));
+
+        options = new HashSet<String>();
+        int nextPos = rawAttributeType.indexOf(';', semicolonPos + 1);
+        while (nextPos > 0) {
+          options.add(rawAttributeType.substring(semicolonPos + 1, nextPos));
+          semicolonPos = nextPos;
+          nextPos = rawAttributeType.indexOf(';', semicolonPos + 1);
+        }
+
+        options.add(rawAttributeType.substring(semicolonPos + 1));
+      } else {
+        baseName = toLowerCase(rawAttributeType);
+        options = null;
+      }
+
+
+      // Actually perform the compare operation.
+      AttributeType attrType = getAttributeType();
+      if (attrType == null) {
+        attrType = DirectoryServer.getAttributeType(baseName, true);
+        setAttributeType(attrType);
+      }
+
+      List<Attribute> attrList = entry.getAttribute(attrType, options);
+      if ((attrList == null) || attrList.isEmpty()) {
+        setResultCode(ResultCode.NO_SUCH_ATTRIBUTE);
+        if (options == null) {
+          appendErrorMessage(WARN_COMPARE_OP_NO_SUCH_ATTR.get(
+            String.valueOf(entryDN), baseName));
+        } else {
+          appendErrorMessage(WARN_COMPARE_OP_NO_SUCH_ATTR_WITH_OPTIONS.get(
+            String.valueOf(entryDN), baseName));
+        }
+      } else {
+        AttributeValue value = AttributeValues.create(attrType,
+          getAssertionValue());
+
+        boolean matchFound = false;
+        for (Attribute a : attrList) {
+          if (a.contains(value)) {
+            matchFound = true;
+            break;
+          }
+        }
+
+        if (matchFound) {
+          setResultCode(ResultCode.COMPARE_TRUE);
+        } else {
+          setResultCode(ResultCode.COMPARE_FALSE);
+        }
+      }
+    }
+
+
+    // Check for a request to cancel this operation.
+    checkIfCanceled(false);
+
+
+    // Invoke the post-operation compare plugins.
+    if (executePostOpPlugins)
+    {
+      PluginResult.PostOperation postOpResult =
+           pluginConfigManager.invokePostOperationComparePlugins(this);
+      if (!postOpResult.continueProcessing())
+      {
+        setResultCode(postOpResult.getResultCode());
+        appendErrorMessage(postOpResult.getErrorMessage());
+        setMatchedDN(postOpResult.getMatchedDN());
+        setReferralURLs(postOpResult.getReferralURLs());
+      }
+    }
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBDeleteOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBDeleteOperation.java
new file mode 100644
index 0000000..f6e9b87
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBDeleteOperation.java
@@ -0,0 +1,428 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.workflowelement.ndb;
+
+
+
+import com.mysql.cluster.ndbj.NdbOperation;
+
+import org.opends.messages.Message;
+import org.opends.server.api.Backend;
+import org.opends.server.api.ChangeNotificationListener;
+import org.opends.server.api.plugin.PluginResult;
+import org.opends.server.backends.ndb.AbstractTransaction;
+import org.opends.server.backends.ndb.BackendImpl;
+import org.opends.server.core.AccessControlConfigManager;
+import org.opends.server.core.DeleteOperation;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.core.PluginConfigManager;
+import org.opends.server.loggers.debug.DebugTracer;
+import org.opends.server.types.CanceledOperationException;
+import org.opends.server.types.DebugLogLevel;
+import org.opends.server.types.DirectoryException;
+import org.opends.server.types.DN;
+import org.opends.server.types.ResultCode;
+import org.opends.server.types.operation.PostOperationDeleteOperation;
+import org.opends.server.types.operation.PostResponseDeleteOperation;
+import org.opends.server.types.operation.PreOperationDeleteOperation;
+import org.opends.server.types.operation.PostSynchronizationDeleteOperation;
+
+import
+  org.opends.server.workflowelement.localbackend.LocalBackendDeleteOperation;
+import
+  org.opends.server.workflowelement.localbackend.LocalBackendWorkflowElement;
+import static org.opends.messages.CoreMessages.*;
+import static org.opends.server.loggers.ErrorLogger.*;
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import static org.opends.server.util.StaticUtils.*;
+
+
+
+/**
+ * This class defines an operation used to delete an entry in a NDB backend
+ * of the Directory Server.
+ */
+public class NDBDeleteOperation
+       extends LocalBackendDeleteOperation
+       implements PreOperationDeleteOperation, PostOperationDeleteOperation,
+                  PostResponseDeleteOperation,
+                  PostSynchronizationDeleteOperation
+{
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+
+
+  /**
+   * Creates a new operation that may be used to delete an entry from a
+   * NDB backend of the Directory Server.
+   *
+   * @param delete The operation to enhance.
+   */
+  public NDBDeleteOperation(DeleteOperation delete)
+  {
+    super(delete);
+    NDBWorkflowElement.attachLocalOperation (delete, this);
+  }
+
+
+
+  /**
+   * Process this delete operation in a NDB backend.
+   *
+   * @param  wfe The local backend work-flow element.
+   *
+   * @throws CanceledOperationException if this operation should be
+   * cancelled
+   */
+  @Override
+  public void processLocalDelete(final LocalBackendWorkflowElement wfe)
+    throws CanceledOperationException {
+    boolean executePostOpPlugins = false;
+
+    this.backend = wfe.getBackend();
+    BackendImpl ndbBackend = (BackendImpl) backend;
+
+    clientConnection = getClientConnection();
+
+    // Get the plugin config manager that will be used for invoking plugins.
+    PluginConfigManager pluginConfigManager =
+         DirectoryServer.getPluginConfigManager();
+
+    // Check for a request to cancel this operation.
+    checkIfCanceled(false);
+
+    // Create a labeled block of code that we can break out of if a problem is
+    // detected.
+deleteProcessing:
+    {
+      // Process the entry DN to convert it from its raw form as provided by the
+      // client to the form required for the rest of the delete processing.
+      entryDN = getEntryDN();
+      if (entryDN == null){
+        break deleteProcessing;
+      }
+
+      AbstractTransaction txn =
+        new AbstractTransaction(ndbBackend.getRootContainer());
+
+      try
+      {
+        // Get the entry to delete.  If it doesn't exist, then fail.
+        try
+        {
+          entry = ndbBackend.getEntryNoCommit(entryDN, txn,
+            NdbOperation.LockMode.LM_Exclusive);
+
+          if (entry == null)
+          {
+            setResultCode(ResultCode.NO_SUCH_OBJECT);
+            appendErrorMessage(ERR_DELETE_NO_SUCH_ENTRY.get(
+                                    String.valueOf(entryDN)));
+
+            try
+            {
+              DN parentDN = entryDN.getParentDNInSuffix();
+              while (parentDN != null)
+              {
+                if (DirectoryServer.entryExists(parentDN))
+                {
+                  setMatchedDN(parentDN);
+                  break;
+                }
+
+                parentDN = parentDN.getParentDNInSuffix();
+              }
+            }
+            catch (Exception e)
+            {
+              if (debugEnabled())
+              {
+                TRACER.debugCaught(DebugLogLevel.ERROR, e);
+              }
+            }
+
+            break deleteProcessing;
+          }
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break deleteProcessing;
+        }
+
+        if(!handleConflictResolution()) {
+            break deleteProcessing;
+        }
+
+        // Check to see if the client has permission to perform the
+        // delete.
+
+        // Check to see if there are any controls in the request.  If so, then
+        // see if there is any special processing required.
+        try
+        {
+          handleRequestControls();
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break deleteProcessing;
+        }
+
+
+        // FIXME: for now assume that this will check all permission
+        // pertinent to the operation. This includes proxy authorization
+        // and any other controls specified.
+
+        // FIXME: earlier checks to see if the entry already exists may
+        // have already exposed sensitive information to the client.
+        if (! AccessControlConfigManager.getInstance().
+                   getAccessControlHandler().isAllowed(this))
+        {
+          setResultCode(ResultCode.INSUFFICIENT_ACCESS_RIGHTS);
+          appendErrorMessage(ERR_DELETE_AUTHZ_INSUFFICIENT_ACCESS_RIGHTS.get(
+                                  String.valueOf(entryDN)));
+          break deleteProcessing;
+        }
+
+        // Check for a request to cancel this operation.
+        checkIfCanceled(false);
+
+
+        // If the operation is not a synchronization operation,
+        // invoke the pre-delete plugins.
+        if (! isSynchronizationOperation())
+        {
+          executePostOpPlugins = true;
+          PluginResult.PreOperation preOpResult =
+               pluginConfigManager.invokePreOperationDeletePlugins(this);
+          if (!preOpResult.continueProcessing())
+          {
+            setResultCode(preOpResult.getResultCode());
+            appendErrorMessage(preOpResult.getErrorMessage());
+            setMatchedDN(preOpResult.getMatchedDN());
+            setReferralURLs(preOpResult.getReferralURLs());
+            break deleteProcessing;
+          }
+        }
+
+
+        // Get the backend to use for the delete.  If there is none, then fail.
+        if (backend == null)
+        {
+          setResultCode(ResultCode.NO_SUCH_OBJECT);
+          appendErrorMessage(ERR_DELETE_NO_SUCH_ENTRY.get(
+                                  String.valueOf(entryDN)));
+          break deleteProcessing;
+        }
+
+
+        // If it is not a private backend, then check to see if the server or
+        // backend is operating in read-only mode.
+        if (! backend.isPrivateBackend())
+        {
+          switch (DirectoryServer.getWritabilityMode())
+          {
+            case DISABLED:
+              setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+              appendErrorMessage(ERR_DELETE_SERVER_READONLY.get(
+                                      String.valueOf(entryDN)));
+              break deleteProcessing;
+
+            case INTERNAL_ONLY:
+              if (! (isInternalOperation() || isSynchronizationOperation()))
+              {
+                setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+                appendErrorMessage(ERR_DELETE_SERVER_READONLY.get(
+                                        String.valueOf(entryDN)));
+                break deleteProcessing;
+              }
+          }
+
+          switch (backend.getWritabilityMode())
+          {
+            case DISABLED:
+              setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+              appendErrorMessage(ERR_DELETE_BACKEND_READONLY.get(
+                                      String.valueOf(entryDN)));
+              break deleteProcessing;
+
+            case INTERNAL_ONLY:
+              if (! (isInternalOperation() || isSynchronizationOperation()))
+              {
+                setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+                appendErrorMessage(ERR_DELETE_BACKEND_READONLY.get(
+                                        String.valueOf(entryDN)));
+                break deleteProcessing;
+              }
+          }
+        }
+
+
+        // The selected backend will have the responsibility of making sure that
+        // the entry actually exists and does not have any children (or possibly
+        // handling a subtree delete).  But we will need to check if there are
+        // any subordinate backends that should stop us from attempting the
+        // delete.
+        Backend[] subBackends = backend.getSubordinateBackends();
+        for (Backend b : subBackends)
+        {
+          DN[] baseDNs = b.getBaseDNs();
+          for (DN dn : baseDNs)
+          {
+            if (dn.isDescendantOf(entryDN))
+            {
+              setResultCode(ResultCode.NOT_ALLOWED_ON_NONLEAF);
+              appendErrorMessage(ERR_DELETE_HAS_SUB_BACKEND.get(
+                                      String.valueOf(entryDN),
+                                      String.valueOf(dn)));
+              break deleteProcessing;
+            }
+          }
+        }
+
+
+        // Actually perform the delete.
+        try
+        {
+          if (noOp)
+          {
+            setResultCode(ResultCode.NO_OPERATION);
+            appendErrorMessage(INFO_DELETE_NOOP.get());
+          }
+          else
+          {
+              if(!processPreOperation()) {
+                  break deleteProcessing;
+              }
+              ndbBackend.deleteEntry(entryDN, entry, this, txn);
+          }
+
+
+          processPreReadControl();
+
+
+          if (! noOp)
+          {
+            setResultCode(ResultCode.SUCCESS);
+          }
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break deleteProcessing;
+        }
+      }
+      finally
+      {
+        processSynchPostOperationPlugins();
+        try {
+          txn.close();
+        } catch (Exception ex) {
+          if (debugEnabled()) {
+            TRACER.debugCaught(DebugLogLevel.ERROR, ex);
+          }
+        }
+      }
+    }
+
+    // Invoke the post-operation or post-synchronization delete plugins.
+    if (isSynchronizationOperation())
+    {
+      if (getResultCode() == ResultCode.SUCCESS)
+      {
+        pluginConfigManager.invokePostSynchronizationDeletePlugins(this);
+      }
+    }
+    else if (executePostOpPlugins)
+    {
+      PluginResult.PostOperation postOpResult =
+          pluginConfigManager.invokePostOperationDeletePlugins(this);
+      if (!postOpResult.continueProcessing())
+      {
+        setResultCode(postOpResult.getResultCode());
+        appendErrorMessage(postOpResult.getErrorMessage());
+        setMatchedDN(postOpResult.getMatchedDN());
+        setReferralURLs(postOpResult.getReferralURLs());
+        return;
+      }
+    }
+
+
+    // Register a post-response call-back which will notify persistent
+    // searches and change listeners.
+    if (getResultCode() == ResultCode.SUCCESS)
+    {
+      registerPostResponseCallback(new Runnable()
+      {
+
+        public void run()
+        {
+          // Notify change listeners.
+          for (ChangeNotificationListener changeListener : DirectoryServer
+              .getChangeNotificationListeners())
+          {
+            try
+            {
+              changeListener.handleDeleteOperation(
+                  NDBDeleteOperation.this, entry);
+            }
+            catch (Exception e)
+            {
+              if (debugEnabled())
+              {
+                TRACER.debugCaught(DebugLogLevel.ERROR, e);
+              }
+
+              Message message = ERR_DELETE_ERROR_NOTIFYING_CHANGE_LISTENER
+                  .get(getExceptionMessage(e));
+              logError(message);
+            }
+          }
+        }
+      });
+    }
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBModifyDNOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBModifyDNOperation.java
new file mode 100644
index 0000000..649a85a
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBModifyDNOperation.java
@@ -0,0 +1,532 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.workflowelement.ndb;
+
+
+
+import com.mysql.cluster.ndbj.NdbOperation;
+import java.util.List;
+
+import org.opends.messages.Message;
+import org.opends.server.api.Backend;
+import org.opends.server.api.ChangeNotificationListener;
+import org.opends.server.api.plugin.PluginResult;
+import org.opends.server.backends.ndb.AbstractTransaction;
+import org.opends.server.backends.ndb.BackendImpl;
+import org.opends.server.core.AccessControlConfigManager;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.core.ModifyDNOperation;
+import org.opends.server.core.PluginConfigManager;
+import org.opends.server.loggers.debug.DebugTracer;
+import org.opends.server.types.CanceledOperationException;
+import org.opends.server.types.DebugLogLevel;
+import org.opends.server.types.DirectoryException;
+import org.opends.server.types.DN;
+import org.opends.server.types.Modification;
+import org.opends.server.types.ResultCode;
+import org.opends.server.types.operation.PostOperationModifyDNOperation;
+import org.opends.server.types.operation.PostResponseModifyDNOperation;
+import org.opends.server.types.operation.PreOperationModifyDNOperation;
+import org.opends.server.types.operation.PostSynchronizationModifyDNOperation;
+
+import
+  org.opends.server.workflowelement.localbackend.LocalBackendModifyDNOperation;
+import
+  org.opends.server.workflowelement.localbackend.LocalBackendWorkflowElement;
+import static org.opends.messages.CoreMessages.*;
+import static org.opends.server.loggers.ErrorLogger.*;
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import static org.opends.server.util.StaticUtils.*;
+
+
+
+/**
+ * This class defines an operation used to move an entry in a NDB backend
+ * of the Directory Server.
+ */
+public class NDBModifyDNOperation
+  extends LocalBackendModifyDNOperation
+  implements PreOperationModifyDNOperation,
+             PostOperationModifyDNOperation,
+             PostResponseModifyDNOperation,
+             PostSynchronizationModifyDNOperation
+{
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+
+
+  /**
+   * Creates a new operation that may be used to move an entry in a
+   * NDB backend of the Directory Server.
+   *
+   * @param operation The operation to enhance.
+   */
+  public NDBModifyDNOperation (ModifyDNOperation operation)
+  {
+    super(operation);
+    NDBWorkflowElement.attachLocalOperation (operation, this);
+  }
+
+
+
+  /**
+   * Process this modify DN operation in a NDB backend.
+   *
+   * @param  wfe The local backend work-flow element.
+   *
+   * @throws CanceledOperationException if this operation should be
+   * cancelled
+   */
+  @Override
+  public void processLocalModifyDN(final LocalBackendWorkflowElement wfe)
+    throws CanceledOperationException {
+    boolean executePostOpPlugins = false;
+
+    this.backend = wfe.getBackend();
+    BackendImpl ndbBackend = (BackendImpl) backend;
+
+    clientConnection = getClientConnection();
+
+    // Get the plugin config manager that will be used for invoking plugins.
+    PluginConfigManager pluginConfigManager =
+         DirectoryServer.getPluginConfigManager();
+
+    // Check for a request to cancel this operation.
+    checkIfCanceled(false);
+
+    // Create a labeled block of code that we can break out of if a problem is
+    // detected.
+modifyDNProcessing:
+    {
+      // Process the entry DN, newRDN, and newSuperior elements from their raw
+      // forms as provided by the client to the forms required for the rest of
+      // the modify DN processing.
+      entryDN = getEntryDN();
+
+      newRDN = getNewRDN();
+      if (newRDN == null)
+      {
+        break modifyDNProcessing;
+      }
+
+      DN newSuperior = getNewSuperior();
+      if ((newSuperior == null) &&
+          (getRawNewSuperior() != null))
+      {
+        break modifyDNProcessing;
+      }
+
+      // Construct the new DN to use for the entry.
+      DN parentDN;
+      if (newSuperior == null)
+      {
+        parentDN = entryDN.getParentDNInSuffix();
+      }
+      else
+      {
+        if(newSuperior.isDescendantOf(entryDN))
+        {
+          setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+          appendErrorMessage(ERR_MODDN_NEW_SUPERIOR_IN_SUBTREE.get(
+              String.valueOf(entryDN), String.valueOf(newSuperior)));
+          break modifyDNProcessing;
+        }
+        parentDN = newSuperior;
+      }
+
+      if ((parentDN == null) || parentDN.isNullDN())
+      {
+        setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+        appendErrorMessage(ERR_MODDN_NO_PARENT.get(String.valueOf(entryDN)));
+        break modifyDNProcessing;
+      }
+
+      DN newDN = parentDN.concat(newRDN);
+
+      // Get the backend for the current entry, and the backend for the new
+      // entry.  If either is null, or if they are different, then fail.
+      Backend currentBackend = backend;
+      if (currentBackend == null)
+      {
+        setResultCode(ResultCode.NO_SUCH_OBJECT);
+        appendErrorMessage(ERR_MODDN_NO_BACKEND_FOR_CURRENT_ENTRY.get(
+                                String.valueOf(entryDN)));
+        break modifyDNProcessing;
+      }
+
+      Backend newBackend = DirectoryServer.getBackend(newDN);
+      if (newBackend == null)
+      {
+        setResultCode(ResultCode.NO_SUCH_OBJECT);
+        appendErrorMessage(ERR_MODDN_NO_BACKEND_FOR_NEW_ENTRY.get(
+                                String.valueOf(entryDN),
+                                String.valueOf(newDN)));
+        break modifyDNProcessing;
+      }
+      else if (! currentBackend.equals(newBackend))
+      {
+        setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+        appendErrorMessage(ERR_MODDN_DIFFERENT_BACKENDS.get(
+                                String.valueOf(entryDN),
+                                String.valueOf(newDN)));
+        break modifyDNProcessing;
+      }
+
+
+      // Check for a request to cancel this operation.
+      checkIfCanceled(false);
+
+      AbstractTransaction txn =
+        new AbstractTransaction(ndbBackend.getRootContainer());
+
+      try
+      {
+        // Get the current entry from the appropriate backend.  If it doesn't
+        // exist, then fail.
+        try
+        {
+          currentEntry = ndbBackend.getEntryNoCommit(entryDN, txn,
+            NdbOperation.LockMode.LM_Exclusive);
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break modifyDNProcessing;
+        }
+
+        if (getOriginalEntry() == null)
+        {
+          // See if one of the entry's ancestors exists.
+          parentDN = entryDN.getParentDNInSuffix();
+          while (parentDN != null)
+          {
+            try
+            {
+              if (DirectoryServer.entryExists(parentDN))
+              {
+                setMatchedDN(parentDN);
+                break;
+              }
+            }
+            catch (Exception e)
+            {
+              if (debugEnabled())
+              {
+                TRACER.debugCaught(DebugLogLevel.ERROR, e);
+              }
+              break;
+            }
+
+            parentDN = parentDN.getParentDNInSuffix();
+          }
+
+          setResultCode(ResultCode.NO_SUCH_OBJECT);
+          appendErrorMessage(ERR_MODDN_NO_CURRENT_ENTRY.get(
+                                  String.valueOf(entryDN)));
+          break modifyDNProcessing;
+        }
+
+        if(!handleConflictResolution()) {
+            break modifyDNProcessing;
+        }
+
+
+        // Check to see if there are any controls in the request.  If so, then
+        // see if there is any special processing required.
+        try
+        {
+          handleRequestControls();
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break modifyDNProcessing;
+        }
+
+
+        // Check to see if the client has permission to perform the
+        // modify DN.
+
+        // FIXME: for now assume that this will check all permission
+        // pertinent to the operation. This includes proxy authorization
+        // and any other controls specified.
+
+        // FIXME: earlier checks to see if the entry or new superior
+        // already exists may have already exposed sensitive information
+        // to the client.
+        if (! AccessControlConfigManager.getInstance().
+                   getAccessControlHandler().isAllowed(this))
+        {
+          setResultCode(ResultCode.INSUFFICIENT_ACCESS_RIGHTS);
+          appendErrorMessage(ERR_MODDN_AUTHZ_INSUFFICIENT_ACCESS_RIGHTS.get(
+                                  String.valueOf(entryDN)));
+          break modifyDNProcessing;
+        }
+
+        // Duplicate the entry and set its new DN.  Also, create an empty list
+        // to hold the attribute-level modifications.
+        newEntry = currentEntry.duplicate(false);
+        newEntry.setDN(newDN);
+
+        // init the modifications
+        addModification(null);
+        List<Modification> modifications = this.getModifications();
+
+
+
+        // Apply any changes to the entry based on the change in its RDN.  Also,
+        // perform schema checking on the updated entry.
+        try
+        {
+          applyRDNChanges(modifications);
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break modifyDNProcessing;
+        }
+
+
+        // Check for a request to cancel this operation.
+        checkIfCanceled(false);
+
+        // Get a count of the current number of modifications.  The
+        // pre-operation plugins may alter this list, and we need to be able to
+        // identify which changes were made after they're done.
+        int modCount = modifications.size();
+
+
+        // If the operation is not a synchronization operation,
+        // Invoke the pre-operation modify DN plugins.
+        if (! isSynchronizationOperation())
+        {
+          executePostOpPlugins = true;
+          PluginResult.PreOperation preOpResult =
+              pluginConfigManager.invokePreOperationModifyDNPlugins(this);
+          if (!preOpResult.continueProcessing())
+          {
+            setResultCode(preOpResult.getResultCode());
+            appendErrorMessage(preOpResult.getErrorMessage());
+            setMatchedDN(preOpResult.getMatchedDN());
+            setReferralURLs(preOpResult.getReferralURLs());
+            break modifyDNProcessing;
+          }
+        }
+
+
+        // Check to see if any of the pre-operation plugins made any changes to
+        // the entry.  If so, then apply them.
+        if (modifications.size() > modCount)
+        {
+          try
+          {
+            applyPreOpModifications(modifications, modCount);
+          }
+          catch (DirectoryException de)
+          {
+            if (debugEnabled())
+            {
+              TRACER.debugCaught(DebugLogLevel.ERROR, de);
+            }
+
+            setResponseData(de);
+            break modifyDNProcessing;
+          }
+        }
+
+
+        // Actually perform the modify DN operation.
+        // This should include taking
+        // care of any synchronization that might be needed.
+        try
+        {
+          // If it is not a private backend, then check to see if the server or
+          // backend is operating in read-only mode.
+          if (! currentBackend.isPrivateBackend())
+          {
+            switch (DirectoryServer.getWritabilityMode())
+            {
+              case DISABLED:
+                setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+                appendErrorMessage(ERR_MODDN_SERVER_READONLY.get(
+                                        String.valueOf(entryDN)));
+                break modifyDNProcessing;
+
+              case INTERNAL_ONLY:
+                if (! (isInternalOperation() || isSynchronizationOperation()))
+                {
+                  setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+                  appendErrorMessage(ERR_MODDN_SERVER_READONLY.get(
+                                          String.valueOf(entryDN)));
+                  break modifyDNProcessing;
+                }
+            }
+
+            switch (currentBackend.getWritabilityMode())
+            {
+              case DISABLED:
+                setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+                appendErrorMessage(ERR_MODDN_BACKEND_READONLY.get(
+                                        String.valueOf(entryDN)));
+                break modifyDNProcessing;
+
+              case INTERNAL_ONLY:
+                if (! (isInternalOperation() || isSynchronizationOperation()))
+                {
+                  setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+                  appendErrorMessage(ERR_MODDN_BACKEND_READONLY.get(
+                                          String.valueOf(entryDN)));
+                  break modifyDNProcessing;
+                }
+            }
+          }
+
+
+          if (noOp)
+          {
+            appendErrorMessage(INFO_MODDN_NOOP.get());
+            setResultCode(ResultCode.NO_OPERATION);
+          }
+          else
+          {
+              if(!processPreOperation()) {
+                  break modifyDNProcessing;
+              }
+              ndbBackend.renameEntry(entryDN, newEntry, this, txn);
+          }
+
+
+          // Attach the pre-read and/or post-read controls to the response if
+          // appropriate.
+          processReadEntryControls();
+
+
+          if (! noOp)
+          {
+            setResultCode(ResultCode.SUCCESS);
+          }
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break modifyDNProcessing;
+        }
+      }
+      finally
+      {
+        processSynchPostOperationPlugins();
+        try {
+          txn.close();
+        } catch (Exception ex) {
+          if (debugEnabled()) {
+            TRACER.debugCaught(DebugLogLevel.ERROR, ex);
+          }
+        }
+      }
+    }
+
+    // Invoke the post-operation or post-synchronization modify DN plugins.
+    if (isSynchronizationOperation())
+    {
+      if (getResultCode() == ResultCode.SUCCESS)
+      {
+        pluginConfigManager.invokePostSynchronizationModifyDNPlugins(this);
+      }
+    }
+    else if (executePostOpPlugins)
+    {
+      PluginResult.PostOperation postOpResult =
+           pluginConfigManager.invokePostOperationModifyDNPlugins(this);
+      if (!postOpResult.continueProcessing())
+      {
+        setResultCode(postOpResult.getResultCode());
+        appendErrorMessage(postOpResult.getErrorMessage());
+        setMatchedDN(postOpResult.getMatchedDN());
+        setReferralURLs(postOpResult.getReferralURLs());
+        return;
+      }
+    }
+
+
+    // Register a post-response call-back which will notify persistent
+    // searches and change listeners.
+    if (getResultCode() == ResultCode.SUCCESS)
+    {
+      registerPostResponseCallback(new Runnable()
+      {
+
+        public void run()
+        {
+          // Notify change listeners.
+          for (ChangeNotificationListener changeListener : DirectoryServer
+              .getChangeNotificationListeners())
+          {
+            try
+            {
+              changeListener.handleModifyDNOperation(
+                  NDBModifyDNOperation.this, currentEntry, newEntry);
+            }
+            catch (Exception e)
+            {
+              if (debugEnabled())
+              {
+                TRACER.debugCaught(DebugLogLevel.ERROR, e);
+              }
+
+              Message message = ERR_MODDN_ERROR_NOTIFYING_CHANGE_LISTENER
+                  .get(getExceptionMessage(e));
+              logError(message);
+            }
+          }
+        }
+      });
+    }
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBModifyOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBModifyOperation.java
new file mode 100644
index 0000000..5748aaf
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBModifyOperation.java
@@ -0,0 +1,529 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.workflowelement.ndb;
+
+
+
+import com.mysql.cluster.ndbj.NdbOperation;
+import org.opends.messages.Message;
+import static org.opends.messages.CoreMessages.*;
+import static org.opends.server.loggers.ErrorLogger.*;
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import static org.opends.server.util.ServerConstants.*;
+import static org.opends.server.util.StaticUtils.*;
+
+
+import org.opends.messages.MessageBuilder;
+import org.opends.server.api.ChangeNotificationListener;
+import org.opends.server.api.plugin.PluginResult;
+import org.opends.server.backends.ndb.AbstractTransaction;
+import org.opends.server.backends.ndb.BackendImpl;
+import org.opends.server.controls.PasswordPolicyErrorType;
+import org.opends.server.controls.PasswordPolicyResponseControl;
+import org.opends.server.core.AccessControlConfigManager;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.core.ModifyOperation;
+import org.opends.server.core.PasswordPolicyState;
+import org.opends.server.core.PluginConfigManager;
+import org.opends.server.loggers.debug.DebugTracer;
+import org.opends.server.types.CanceledOperationException;
+import org.opends.server.types.Control;
+import org.opends.server.types.DN;
+import org.opends.server.types.DebugLogLevel;
+import org.opends.server.types.DirectoryException;
+import org.opends.server.types.ResultCode;
+import org.opends.server.types.operation.PostOperationModifyOperation;
+import org.opends.server.types.operation.PostResponseModifyOperation;
+import org.opends.server.types.operation.PostSynchronizationModifyOperation;
+import org.opends.server.types.operation.PreOperationModifyOperation;
+import org.opends.server.util.TimeThread;
+import
+  org.opends.server.workflowelement.localbackend.LocalBackendModifyOperation;
+import
+  org.opends.server.workflowelement.localbackend.LocalBackendWorkflowElement;
+
+
+
+/**
+ * This class defines an operation used to modify an entry in a NDB backend
+ * of the Directory Server.
+ */
+public class NDBModifyOperation
+       extends LocalBackendModifyOperation
+       implements PreOperationModifyOperation, PostOperationModifyOperation,
+                  PostResponseModifyOperation,
+                  PostSynchronizationModifyOperation
+{
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+
+
+  /**
+   * Creates a new operation that may be used to modify an entry in a
+   * NDB backend of the Directory Server.
+   *
+   * @param modify The operation to enhance.
+   */
+  public NDBModifyOperation(ModifyOperation modify)
+  {
+    super(modify);
+    NDBWorkflowElement.attachLocalOperation (modify, this);
+  }
+
+
+
+  /**
+   * Process this modify operation against a NDB backend.
+   *
+   * @param  wfe The local backend work-flow element.
+   *
+   * @throws CanceledOperationException if this operation should be
+   * cancelled
+   */
+  @Override
+  public void processLocalModify(final LocalBackendWorkflowElement wfe)
+    throws CanceledOperationException {
+    boolean executePostOpPlugins = false;
+
+    this.backend = wfe.getBackend();
+    BackendImpl ndbBackend = (BackendImpl) backend;
+
+    clientConnection = getClientConnection();
+
+    // Get the plugin config manager that will be used for invoking plugins.
+    PluginConfigManager pluginConfigManager =
+      DirectoryServer.getPluginConfigManager();
+
+    // Check for a request to cancel this operation.
+    checkIfCanceled(false);
+
+    // Create a labeled block of code that we can break out of if a problem is
+    // detected.
+modifyProcessing:
+    {
+      entryDN = getEntryDN();
+      if (entryDN == null){
+        break modifyProcessing;
+      }
+
+      // Process the modifications to convert them from their raw form to the
+      // form required for the rest of the modify processing.
+      modifications = getModifications();
+      if (modifications == null)
+      {
+        break modifyProcessing;
+      }
+
+      if (modifications.isEmpty())
+      {
+        setResultCode(ResultCode.CONSTRAINT_VIOLATION);
+        appendErrorMessage(ERR_MODIFY_NO_MODIFICATIONS.get(
+                                String.valueOf(entryDN)));
+        break modifyProcessing;
+      }
+
+
+      // If the user must change their password before doing anything else, and
+      // if the target of the modify operation isn't the user's own entry, then
+      // reject the request.
+      if ((! isInternalOperation()) && clientConnection.mustChangePassword())
+      {
+        DN authzDN = getAuthorizationDN();
+        if ((authzDN != null) && (! authzDN.equals(entryDN)))
+        {
+          // The user will not be allowed to do anything else before the
+          // password gets changed.  Also note that we haven't yet checked the
+          // request controls so we need to do that now to see if the password
+          // policy request control was provided.
+          for (Control c : getRequestControls())
+          {
+            if (c.getOID().equals(OID_PASSWORD_POLICY_CONTROL))
+            {
+              pwPolicyControlRequested = true;
+              pwpErrorType = PasswordPolicyErrorType.CHANGE_AFTER_RESET;
+              break;
+            }
+          }
+
+          setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+          appendErrorMessage(ERR_MODIFY_MUST_CHANGE_PASSWORD.get());
+          break modifyProcessing;
+        }
+      }
+
+
+      // Check for a request to cancel this operation.
+      checkIfCanceled(false);
+
+      AbstractTransaction txn =
+        new AbstractTransaction(ndbBackend.getRootContainer());
+
+      try
+      {
+        // Check for a request to cancel this operation.
+        checkIfCanceled(false);
+
+
+        try
+        {
+          // Get the entry to modify.  If it does not exist, then fail.
+          currentEntry = ndbBackend.getEntryNoCommit(entryDN, txn,
+            NdbOperation.LockMode.LM_Exclusive);
+
+          if (currentEntry == null)
+          {
+            setResultCode(ResultCode.NO_SUCH_OBJECT);
+            appendErrorMessage(ERR_MODIFY_NO_SUCH_ENTRY.get(
+                String.valueOf(entryDN)));
+
+            // See if one of the entry's ancestors exists.
+            try
+            {
+              DN parentDN = entryDN.getParentDNInSuffix();
+              while (parentDN != null)
+              {
+                if (DirectoryServer.entryExists(parentDN))
+                {
+                  setMatchedDN(parentDN);
+                  break;
+                }
+
+                parentDN = parentDN.getParentDNInSuffix();
+              }
+            }
+            catch (Exception e)
+            {
+              if (debugEnabled())
+              {
+                TRACER.debugCaught(DebugLogLevel.ERROR, e);
+              }
+            }
+
+            break modifyProcessing;
+          }
+
+          // Check to see if there are any controls in the request.  If so, then
+          // see if there is any special processing required.
+          processRequestControls();
+
+          // Get the password policy state object for the entry that can be used
+          // to perform any appropriate password policy processing.  Also, see
+          // if the entry is being updated by the end user or an administrator.
+          selfChange = entryDN.equals(getAuthorizationDN());
+
+          // FIXME -- Need a way to enable debug mode.
+          pwPolicyState = new PasswordPolicyState(currentEntry, false,
+                                                  TimeThread.getTime(), true);
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break modifyProcessing;
+        }
+
+
+        // Create a duplicate of the entry and apply the changes to it.
+        modifiedEntry = currentEntry.duplicate(false);
+
+        if (! noOp)
+        {
+            if(!handleConflictResolution()) {
+                break modifyProcessing;
+            }
+        }
+
+
+        try
+        {
+          handleSchemaProcessing();
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break modifyProcessing;
+        }
+
+
+        // Check to see if the client has permission to perform the modify.
+        // The access control check is not made any earlier because the handler
+        // needs access to the modified entry.
+
+        // FIXME: for now assume that this will check all permissions
+        // pertinent to the operation. This includes proxy authorization
+        // and any other controls specified.
+
+        // FIXME: earlier checks to see if the entry already exists may have
+        // already exposed sensitive information to the client.
+        if (! AccessControlConfigManager.getInstance().
+                   getAccessControlHandler().isAllowed(this))
+        {
+          setResultCode(ResultCode.INSUFFICIENT_ACCESS_RIGHTS);
+          appendErrorMessage(ERR_MODIFY_AUTHZ_INSUFFICIENT_ACCESS_RIGHTS.get(
+                                  String.valueOf(entryDN)));
+          break modifyProcessing;
+        }
+
+
+        try
+        {
+          handleInitialPasswordPolicyProcessing();
+
+          wasLocked = false;
+          if (passwordChanged)
+          {
+            performAdditionalPasswordChangedProcessing();
+          }
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break modifyProcessing;
+        }
+
+
+        if ((! passwordChanged) && (! isInternalOperation()) &&
+            pwPolicyState.mustChangePassword())
+        {
+          // The user will not be allowed to do anything else before the
+          // password gets changed.
+          pwpErrorType = PasswordPolicyErrorType.CHANGE_AFTER_RESET;
+          setResultCode(ResultCode.UNWILLING_TO_PERFORM);
+          appendErrorMessage(ERR_MODIFY_MUST_CHANGE_PASSWORD.get());
+          break modifyProcessing;
+        }
+
+
+        // If the server is configured to check the schema and the
+        // operation is not a sycnhronization operation,
+        // make sure that the new entry is valid per the server schema.
+        if ((DirectoryServer.checkSchema()) && (! isSynchronizationOperation()))
+        {
+          MessageBuilder invalidReason = new MessageBuilder();
+          if (! modifiedEntry.conformsToSchema(null, false, false, false,
+              invalidReason))
+          {
+            setResultCode(ResultCode.OBJECTCLASS_VIOLATION);
+            appendErrorMessage(ERR_MODIFY_VIOLATES_SCHEMA.get(
+                                    String.valueOf(entryDN), invalidReason));
+            break modifyProcessing;
+          }
+        }
+
+
+        // Check for a request to cancel this operation.
+        checkIfCanceled(false);
+
+        // If the operation is not a synchronization operation,
+        // Invoke the pre-operation modify plugins.
+        if (! isSynchronizationOperation())
+        {
+          executePostOpPlugins = true;
+          PluginResult.PreOperation preOpResult =
+            pluginConfigManager.invokePreOperationModifyPlugins(this);
+          if (!preOpResult.continueProcessing())
+          {
+            setResultCode(preOpResult.getResultCode());
+            appendErrorMessage(preOpResult.getErrorMessage());
+            setMatchedDN(preOpResult.getMatchedDN());
+            setReferralURLs(preOpResult.getReferralURLs());
+            break modifyProcessing;
+          }
+        }
+
+
+        // Actually perform the modify operation.  This should also include
+        // taking care of any synchronization that might be needed.
+        if (backend == null)
+        {
+          setResultCode(ResultCode.NO_SUCH_OBJECT);
+          appendErrorMessage(ERR_MODIFY_NO_BACKEND_FOR_ENTRY.get(
+                                  String.valueOf(entryDN)));
+          break modifyProcessing;
+        }
+
+        try
+        {
+          try
+          {
+            checkWritability();
+          }
+          catch (DirectoryException de)
+          {
+            if (debugEnabled())
+            {
+              TRACER.debugCaught(DebugLogLevel.ERROR, de);
+            }
+
+            setResponseData(de);
+            break modifyProcessing;
+          }
+
+
+          if (noOp)
+          {
+            appendErrorMessage(INFO_MODIFY_NOOP.get());
+            setResultCode(ResultCode.NO_OPERATION);
+          }
+          else
+          {
+              if(!processPreOperation()) {
+                  break modifyProcessing;
+              }
+
+            ndbBackend.replaceEntry(currentEntry, modifiedEntry, this, txn);
+
+
+
+            // See if we need to generate any account status notifications as a
+            // result of the changes.
+            if (passwordChanged || enabledStateChanged || wasLocked)
+            {
+              handleAccountStatusNotifications();
+            }
+          }
+
+
+          // Handle any processing that may be needed for the pre-read and/or
+          // post-read controls.
+          handleReadEntryProcessing();
+
+
+          if (! noOp)
+          {
+            setResultCode(ResultCode.SUCCESS);
+          }
+        }
+        catch (DirectoryException de)
+        {
+          if (debugEnabled())
+          {
+            TRACER.debugCaught(DebugLogLevel.ERROR, de);
+          }
+
+          setResponseData(de);
+          break modifyProcessing;
+        }
+      }
+      finally
+      {
+        processSynchPostOperationPlugins();
+        try {
+          txn.close();
+        } catch (Exception ex) {
+          if (debugEnabled()) {
+            TRACER.debugCaught(DebugLogLevel.ERROR, ex);
+          }
+        }
+      }
+    }
+
+    // If the password policy request control was included, then make sure we
+    // send the corresponding response control.
+    if (pwPolicyControlRequested)
+    {
+      addResponseControl(new PasswordPolicyResponseControl(null, 0,
+                                                           pwpErrorType));
+    }
+
+    // Invoke the post-operation or post-synchronization modify plugins.
+    if (isSynchronizationOperation())
+    {
+      if (getResultCode() == ResultCode.SUCCESS)
+      {
+        pluginConfigManager.invokePostSynchronizationModifyPlugins(this);
+      }
+    }
+    else if (executePostOpPlugins)
+    {
+      // FIXME -- Should this also be done while holding the locks?
+      PluginResult.PostOperation postOpResult =
+           pluginConfigManager.invokePostOperationModifyPlugins(this);
+      if (!postOpResult.continueProcessing())
+      {
+        setResultCode(postOpResult.getResultCode());
+        appendErrorMessage(postOpResult.getErrorMessage());
+        setMatchedDN(postOpResult.getMatchedDN());
+        setReferralURLs(postOpResult.getReferralURLs());
+        return;
+      }
+    }
+
+
+    // Register a post-response call-back which will notify persistent
+    // searches and change listeners.
+    if (getResultCode() == ResultCode.SUCCESS)
+    {
+      registerPostResponseCallback(new Runnable()
+      {
+
+        public void run()
+        {
+          // Notify change listeners.
+          for (ChangeNotificationListener changeListener : DirectoryServer
+              .getChangeNotificationListeners())
+          {
+            try
+            {
+              changeListener
+                  .handleModifyOperation(NDBModifyOperation.this,
+                      currentEntry, modifiedEntry);
+            }
+            catch (Exception e)
+            {
+              if (debugEnabled())
+              {
+                TRACER.debugCaught(DebugLogLevel.ERROR, e);
+              }
+
+              Message message = ERR_MODIFY_ERROR_NOTIFYING_CHANGE_LISTENER
+                  .get(getExceptionMessage(e));
+              logError(message);
+            }
+          }
+        }
+      });
+    }
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBSearchOperation.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBSearchOperation.java
new file mode 100644
index 0000000..e37b18f
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBSearchOperation.java
@@ -0,0 +1,437 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.workflowelement.ndb;
+
+
+
+
+import java.util.List;
+import org.opends.server.api.plugin.PluginResult;
+import org.opends.server.controls.LDAPAssertionRequestControl;
+import org.opends.server.controls.MatchedValuesControl;
+import org.opends.server.controls.ProxiedAuthV1Control;
+import org.opends.server.controls.ProxiedAuthV2Control;
+import org.opends.server.core.AccessControlConfigManager;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.core.PluginConfigManager;
+import org.opends.server.core.SearchOperation;
+import org.opends.server.loggers.debug.DebugTracer;
+import org.opends.server.types.CanceledOperationException;
+import org.opends.server.types.Control;
+import org.opends.server.types.DN;
+import org.opends.server.types.DebugLogLevel;
+import org.opends.server.types.DirectoryException;
+import org.opends.server.types.Entry;
+import org.opends.server.types.Privilege;
+import org.opends.server.types.ResultCode;
+import org.opends.server.types.SearchFilter;
+import org.opends.server.types.operation.PostOperationSearchOperation;
+import org.opends.server.types.operation.PreOperationSearchOperation;
+import org.opends.server.types.operation.SearchEntrySearchOperation;
+import org.opends.server.types.operation.SearchReferenceSearchOperation;
+
+import
+  org.opends.server.workflowelement.localbackend.LocalBackendSearchOperation;
+import
+  org.opends.server.workflowelement.localbackend.LocalBackendWorkflowElement;
+import static org.opends.messages.CoreMessages.*;
+import static org.opends.server.loggers.debug.DebugLogger.*;
+import static org.opends.server.util.ServerConstants.*;
+import static org.opends.server.util.StaticUtils.*;
+
+
+
+/**
+ * This class defines an operation used to search for entries in a NDB backend
+ * of the Directory Server.
+ */
+public class NDBSearchOperation
+       extends LocalBackendSearchOperation
+       implements PreOperationSearchOperation, PostOperationSearchOperation,
+                  SearchEntrySearchOperation, SearchReferenceSearchOperation
+{
+  /**
+   * The tracer object for the debug logger.
+   */
+  private static final DebugTracer TRACER = getTracer();
+
+
+
+  /**
+   * Creates a new operation that may be used to search for entries in a NDB
+   * backend of the Directory Server.
+   *
+   * @param  search  The operation to process.
+   */
+  public NDBSearchOperation(SearchOperation search)
+  {
+    super(search);
+    NDBWorkflowElement.attachLocalOperation(search, this);
+  }
+
+
+
+  /**
+   * Process this search operation against a NDB backend.
+   *
+   * @param  wfe The local backend work-flow element.
+   *
+   * @throws CanceledOperationException if this operation should be
+   * cancelled
+   */
+  @Override
+  public void processLocalSearch(LocalBackendWorkflowElement wfe)
+    throws CanceledOperationException {
+    boolean executePostOpPlugins = false;
+
+    this.backend = wfe.getBackend();
+
+    clientConnection = getClientConnection();
+
+    // Get the plugin config manager that will be used for invoking plugins.
+    PluginConfigManager pluginConfigManager =
+      DirectoryServer.getPluginConfigManager();
+    processSearch = true;
+
+    // Check for a request to cancel this operation.
+    checkIfCanceled(false);
+
+    // Create a labeled block of code that we can break out of if a problem is
+    // detected.
+searchProcessing:
+    {
+      // Process the search base and filter to convert them from their raw forms
+      // as provided by the client to the forms required for the rest of the
+      // search processing.
+      baseDN = getBaseDN();
+      filter = getFilter();
+
+      if ((baseDN == null) || (filter == null)){
+        break searchProcessing;
+      }
+
+      // Check to see if there are any controls in the request.  If so, then
+      // see if there is any special processing required.
+      try
+      {
+        handleRequestControls();
+      }
+      catch (DirectoryException de)
+      {
+        if (debugEnabled())
+        {
+          TRACER.debugCaught(DebugLogLevel.ERROR, de);
+        }
+
+        setResponseData(de);
+        break searchProcessing;
+      }
+
+
+      // Check to see if the client has permission to perform the
+      // search.
+
+      // FIXME: for now assume that this will check all permission
+      // pertinent to the operation. This includes proxy authorization
+      // and any other controls specified.
+      if (! AccessControlConfigManager.getInstance().getAccessControlHandler().
+                 isAllowed(this))
+      {
+        setResultCode(ResultCode.INSUFFICIENT_ACCESS_RIGHTS);
+        appendErrorMessage(ERR_SEARCH_AUTHZ_INSUFFICIENT_ACCESS_RIGHTS.get(
+                                String.valueOf(baseDN)));
+        break searchProcessing;
+      }
+
+      // Check for a request to cancel this operation.
+      checkIfCanceled(false);
+
+
+      // Invoke the pre-operation search plugins.
+      executePostOpPlugins = true;
+      PluginResult.PreOperation preOpResult =
+          pluginConfigManager.invokePreOperationSearchPlugins(this);
+      if (!preOpResult.continueProcessing())
+      {
+        setResultCode(preOpResult.getResultCode());
+        appendErrorMessage(preOpResult.getErrorMessage());
+        setMatchedDN(preOpResult.getMatchedDN());
+        setReferralURLs(preOpResult.getReferralURLs());
+        break searchProcessing;
+      }
+
+
+      // Check for a request to cancel this operation.
+      checkIfCanceled(false);
+
+
+      // Get the backend that should hold the search base.  If there is none,
+      // then fail.
+      if (backend == null)
+      {
+        setResultCode(ResultCode.NO_SUCH_OBJECT);
+        appendErrorMessage(ERR_SEARCH_BASE_DOESNT_EXIST.get(
+                                String.valueOf(baseDN)));
+        break searchProcessing;
+      }
+
+
+      // We'll set the result code to "success".  If a problem occurs, then it
+      // will be overwritten.
+      setResultCode(ResultCode.SUCCESS);
+
+
+      // Process the search in the backend and all its subordinates.
+      try
+      {
+        if (processSearch)
+        {
+          backend.search(this);
+        }
+      }
+      catch (DirectoryException de)
+      {
+        if (debugEnabled())
+        {
+          TRACER.debugCaught(DebugLogLevel.VERBOSE, de);
+        }
+
+        setResponseData(de);
+
+        break searchProcessing;
+      }
+      catch (CanceledOperationException coe)
+      {
+        throw coe;
+      }
+      catch (Exception e)
+      {
+        if (debugEnabled())
+        {
+          TRACER.debugCaught(DebugLogLevel.ERROR, e);
+        }
+
+        setResultCode(DirectoryServer.getServerErrorResultCode());
+        appendErrorMessage(ERR_SEARCH_BACKEND_EXCEPTION.get(
+                                getExceptionMessage(e)));
+
+        break searchProcessing;
+      }
+    }
+
+
+    // Check for a request to cancel this operation.
+    checkIfCanceled(false);
+
+    // Invoke the post-operation search plugins.
+    if (executePostOpPlugins)
+    {
+      PluginResult.PostOperation postOpResult =
+           pluginConfigManager.invokePostOperationSearchPlugins(this);
+      if (!postOpResult.continueProcessing())
+      {
+        setResultCode(postOpResult.getResultCode());
+        appendErrorMessage(postOpResult.getErrorMessage());
+        setMatchedDN(postOpResult.getMatchedDN());
+        setReferralURLs(postOpResult.getReferralURLs());
+      }
+    }
+  }
+
+
+
+  /**
+   * Handles any controls contained in the request.
+   *
+   * @throws  DirectoryException  If there is a problem with any of the request
+   *                              controls.
+   */
+  @Override
+  protected void handleRequestControls()
+          throws DirectoryException
+  {
+    List<Control> requestControls  = getRequestControls();
+    if ((requestControls != null) && (! requestControls.isEmpty()))
+    {
+      for (int i=0; i < requestControls.size(); i++)
+      {
+        Control c   = requestControls.get(i);
+        String  oid = c.getOID();
+        if (! AccessControlConfigManager.getInstance().
+                   getAccessControlHandler().isAllowed(baseDN, this, c))
+        {
+          throw new DirectoryException(ResultCode.INSUFFICIENT_ACCESS_RIGHTS,
+                         ERR_CONTROL_INSUFFICIENT_ACCESS_RIGHTS.get(oid));
+        }
+
+        if (oid.equals(OID_LDAP_ASSERTION))
+        {
+          LDAPAssertionRequestControl assertControl =
+                getRequestControl(LDAPAssertionRequestControl.DECODER);
+
+          try
+          {
+            // FIXME -- We need to determine whether the current user has
+            //          permission to make this determination.
+            SearchFilter assertionFilter = assertControl.getSearchFilter();
+            Entry entry;
+            try
+            {
+              entry = DirectoryServer.getEntry(baseDN);
+            }
+            catch (DirectoryException de)
+            {
+              if (debugEnabled())
+              {
+                TRACER.debugCaught(DebugLogLevel.ERROR, de);
+              }
+
+              throw new DirectoryException(de.getResultCode(),
+                             ERR_SEARCH_CANNOT_GET_ENTRY_FOR_ASSERTION.get(
+                                  de.getMessageObject()));
+            }
+
+            if (entry == null)
+            {
+              throw new DirectoryException(ResultCode.NO_SUCH_OBJECT,
+                             ERR_SEARCH_NO_SUCH_ENTRY_FOR_ASSERTION.get());
+            }
+
+            if (! assertionFilter.matchesEntry(entry))
+            {
+              throw new DirectoryException(ResultCode.ASSERTION_FAILED,
+                                           ERR_SEARCH_ASSERTION_FAILED.get());
+            }
+          }
+          catch (DirectoryException de)
+          {
+            if (de.getResultCode() == ResultCode.ASSERTION_FAILED)
+            {
+              throw de;
+            }
+
+            if (debugEnabled())
+            {
+              TRACER.debugCaught(DebugLogLevel.ERROR, de);
+            }
+
+            throw new DirectoryException(ResultCode.PROTOCOL_ERROR,
+                           ERR_SEARCH_CANNOT_PROCESS_ASSERTION_FILTER.get(
+                                de.getMessageObject()), de);
+          }
+        }
+        else if (oid.equals(OID_PROXIED_AUTH_V1))
+        {
+          // The requester must have the PROXIED_AUTH privilige in order to be
+          // able to use this control.
+          if (! clientConnection.hasPrivilege(Privilege.PROXIED_AUTH, this))
+          {
+            throw new DirectoryException(ResultCode.AUTHORIZATION_DENIED,
+                           ERR_PROXYAUTH_INSUFFICIENT_PRIVILEGES.get());
+          }
+
+          ProxiedAuthV1Control proxyControl =
+              getRequestControl(ProxiedAuthV1Control.DECODER);
+
+          Entry authorizationEntry = proxyControl.getAuthorizationEntry();
+          setAuthorizationEntry(authorizationEntry);
+          if (authorizationEntry == null)
+          {
+            setProxiedAuthorizationDN(DN.nullDN());
+          }
+          else
+          {
+            setProxiedAuthorizationDN(authorizationEntry.getDN());
+          }
+        }
+        else if (oid.equals(OID_PROXIED_AUTH_V2))
+        {
+          // The requester must have the PROXIED_AUTH privilige in order to be
+          // able to use this control.
+          if (! clientConnection.hasPrivilege(Privilege.PROXIED_AUTH, this))
+          {
+            throw new DirectoryException(ResultCode.AUTHORIZATION_DENIED,
+                           ERR_PROXYAUTH_INSUFFICIENT_PRIVILEGES.get());
+          }
+
+          ProxiedAuthV2Control proxyControl =
+              getRequestControl(ProxiedAuthV2Control.DECODER);
+
+          Entry authorizationEntry = proxyControl.getAuthorizationEntry();
+          setAuthorizationEntry(authorizationEntry);
+          if (authorizationEntry == null)
+          {
+            setProxiedAuthorizationDN(DN.nullDN());
+          }
+          else
+          {
+            setProxiedAuthorizationDN(authorizationEntry.getDN());
+          }
+        }
+        else if (oid.equals(OID_LDAP_SUBENTRIES))
+        {
+          setReturnLDAPSubentries(true);
+        }
+        else if (oid.equals(OID_MATCHED_VALUES))
+        {
+          MatchedValuesControl matchedValuesControl =
+                getRequestControl(MatchedValuesControl.DECODER);
+          setMatchedValuesControl(matchedValuesControl);
+        }
+        else if (oid.equals(OID_ACCOUNT_USABLE_CONTROL))
+        {
+          setIncludeUsableControl(true);
+        }
+        else if (oid.equals(OID_REAL_ATTRS_ONLY))
+        {
+          setRealAttributesOnly(true);
+        }
+        else if (oid.equals(OID_VIRTUAL_ATTRS_ONLY))
+        {
+          setVirtualAttributesOnly(true);
+        }
+        else if (oid.equals(OID_GET_EFFECTIVE_RIGHTS) &&
+          DirectoryServer.isSupportedControl(OID_GET_EFFECTIVE_RIGHTS))
+        {
+          // Do nothing here and let AciHandler deal with it.
+        }
+
+        // NYI -- Add support for additional controls.
+
+        else if (c.isCritical())
+        {
+          if ((backend == null) || (! backend.supportsControl(oid)))
+          {
+            throw new DirectoryException(
+                           ResultCode.UNAVAILABLE_CRITICAL_EXTENSION,
+                           ERR_SEARCH_UNSUPPORTED_CRITICAL_CONTROL.get(oid));
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBWorkflowElement.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBWorkflowElement.java
new file mode 100644
index 0000000..8110ee2
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/NDBWorkflowElement.java
@@ -0,0 +1,436 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+package org.opends.server.workflowelement.ndb;
+
+
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.TreeMap;
+
+import org.opends.messages.Message;
+import org.opends.server.admin.server.ServerManagementContext;
+import org.opends.server.admin.std.server.BackendCfg;
+import org.opends.server.admin.std.server.LocalBackendWorkflowElementCfg;
+import org.opends.server.admin.std.server.RootCfg;
+import org.opends.server.api.Backend;
+import org.opends.server.config.ConfigException;
+import org.opends.server.core.AddOperation;
+import org.opends.server.core.BindOperation;
+import org.opends.server.core.CompareOperation;
+import org.opends.server.core.DeleteOperation;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.core.ModifyDNOperation;
+import org.opends.server.core.ModifyOperation;
+import org.opends.server.core.SearchOperation;
+import org.opends.server.types.*;
+import
+  org.opends.server.workflowelement.localbackend.LocalBackendWorkflowElement;
+
+import static org.opends.server.config.ConfigConstants.*;
+
+
+
+/**
+ * This class defines a NDB backend workflow element; e-g an entity that
+ * handle the processing of an operation against a NDB backend.
+ */
+public class NDBWorkflowElement extends LocalBackendWorkflowElement
+{
+  // The backend associated with the NDB workflow element.
+  private Backend backend;
+
+
+  // The set of NDB backend workflow elements registered with the server.
+  private static TreeMap<String, NDBWorkflowElement>
+       registeredNDBBackends =
+            new TreeMap<String, NDBWorkflowElement>();
+
+
+  // The lock to guarantee safe concurrent access to the
+  // registeredNDBBackends variable.
+  private static final Object registeredNDBBackendsLock = new Object();
+
+
+  // The string indicating the type of the workflow element.
+  private final String BACKEND_WORKFLOW_ELEMENT = "Backend";
+
+
+  /**
+   * Creates a new instance of the NDB backend workflow element.
+   */
+  public NDBWorkflowElement()
+  {
+    // There is nothing to do in this constructor.
+  }
+
+
+  /**
+   * Initializes a new instance of the NDB backend workflow element.
+   * This method is intended to be called by DirectoryServer when
+   * workflow configuration mode is auto as opposed to
+   * initializeWorkflowElement which is invoked when workflow
+   * configuration mode is manual.
+   *
+   * @param workflowElementID  the workflow element identifier
+   * @param backend  the backend associated to that workflow element
+   */
+  private void initialize(String workflowElementID, Backend backend)
+  {
+    // Initialize the workflow ID
+    super.initialize(workflowElementID, BACKEND_WORKFLOW_ELEMENT);
+
+    this.backend  = backend;
+
+    if (this.backend != null)
+    {
+      setPrivate(this.backend.isPrivateBackend());
+    }
+  }
+
+
+  /**
+   * Initializes a new instance of the NDB backend workflow element.
+   * This method is intended to be called by DirectoryServer when
+   * workflow configuration mode is manual as opposed to
+   * initialize(String,Backend) which is invoked when workflow
+   * configuration mode is auto.
+   *
+   * @param  configuration  The configuration for this NDB backend
+   *                        workflow element.
+   *
+   * @throws  ConfigException  If there is a problem with the provided
+   *                           configuration.
+   *
+   * @throws  InitializationException  If an error occurs while trying
+   *                                   to initialize this workflow
+   *                                   element that is not related to
+   *                                   the provided configuration.
+   */
+  @Override
+  public void initializeWorkflowElement(
+      LocalBackendWorkflowElementCfg configuration
+      ) throws ConfigException, InitializationException
+  {
+    configuration.addLocalBackendChangeListener(this);
+
+    // Read configuration and apply changes.
+    processWorkflowElementConfig(configuration, true);
+  }
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public void finalizeWorkflowElement()
+  {
+    // null all fields so that any use of the finalized object will raise
+    // an NPE
+    super.initialize(null, null);
+    backend = null;
+  }
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public boolean isConfigurationChangeAcceptable(
+      LocalBackendWorkflowElementCfg configuration,
+      List<Message>                  unacceptableReasons
+      )
+  {
+    boolean isAcceptable =
+      processWorkflowElementConfig(configuration, false);
+
+    return isAcceptable;
+  }
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public ConfigChangeResult applyConfigurationChange(
+      LocalBackendWorkflowElementCfg configuration
+      )
+  {
+    // Returned result.
+    ConfigChangeResult changeResult = new ConfigChangeResult(
+        ResultCode.SUCCESS, false, new ArrayList<Message>()
+        );
+
+    processWorkflowElementConfig(configuration, true);
+
+    return changeResult;
+  }
+
+
+  /**
+   * Parses the provided configuration and configure the workflow element.
+   *
+   * @param configuration  The new configuration containing the changes.
+   * @param applyChanges   If true then take into account the new configuration.
+   *
+   * @return  <code>true</code> if the configuration is acceptable.
+   */
+  private boolean processWorkflowElementConfig(
+      LocalBackendWorkflowElementCfg configuration,
+      boolean                        applyChanges
+      )
+  {
+    // returned status
+    boolean isAcceptable = true;
+
+    // If the workflow element is disabled then do nothing. Note that the
+    // configuration manager could have finalized the object right before.
+    if (configuration.isEnabled())
+    {
+      // Read configuration.
+      String newBackendID = configuration.getBackend();
+      Backend newBackend  = DirectoryServer.getBackend(newBackendID);
+
+      // If the backend is null (i.e. not found in the list of
+      // registered backends, this is probably because we are looking
+      // for the config backend
+      if (newBackend == null) {
+        ServerManagementContext context = ServerManagementContext.getInstance();
+        RootCfg root = context.getRootConfiguration();
+        try {
+          BackendCfg backendCfg = root.getBackend(newBackendID);
+          if (backendCfg.getBaseDN().contains(DN.decode(DN_CONFIG_ROOT))) {
+            newBackend = DirectoryServer.getConfigHandler();
+          }
+        } catch (Exception ex) {
+          // Unable to find the backend
+          newBackend = null;
+        }
+      }
+
+      // Get the new configuration
+      if (applyChanges)
+      {
+        super.initialize(
+          configuration.getWorkflowElementId(), BACKEND_WORKFLOW_ELEMENT);
+        backend = newBackend;
+      }
+    }
+
+    return isAcceptable;
+  }
+
+
+  /**
+   * Creates and registers a NDB backend with the server.
+   *
+   * @param workflowElementID  the identifier of the workflow element to create
+   * @param backend            the backend to associate with the NDB backend
+   *                           workflow element
+   *
+   * @return the existing NDB backend workflow element if it was
+   *         already created or a newly created NDB backend workflow
+   *         element.
+   */
+  public static NDBWorkflowElement createAndRegister(
+      String workflowElementID,
+      Backend backend)
+  {
+    NDBWorkflowElement ndbBackend = null;
+
+    // If the requested workflow element does not exist then create one.
+    ndbBackend = registeredNDBBackends.get(workflowElementID);
+    if (ndbBackend == null)
+    {
+      ndbBackend = new NDBWorkflowElement();
+      ndbBackend.initialize(workflowElementID, backend);
+
+      // store the new NDB backend in the list of registered backends
+      registerNDBBackend(ndbBackend);
+    }
+
+    return ndbBackend;
+  }
+
+
+
+  /**
+   * Removes a NDB backend that was registered with the server.
+   *
+   * @param workflowElementID  the identifier of the workflow element to remove
+   */
+  public static void remove(String workflowElementID)
+  {
+    deregisterNDBBackend(workflowElementID);
+  }
+
+
+
+  /**
+   * Removes all the NDB backends that were registered with the server.
+   * This function is intended to be called when the server is shutting down.
+   */
+  public static void removeAll()
+  {
+    synchronized (registeredNDBBackendsLock)
+    {
+      for (NDBWorkflowElement ndbBackend:
+           registeredNDBBackends.values())
+      {
+        deregisterNDBBackend(ndbBackend.getWorkflowElementID());
+      }
+    }
+  }
+
+
+
+  /**
+   * Registers a NDB backend with the server.
+   *
+   * @param ndbBackend  the NDB backend to register with the server
+   */
+  private static void registerNDBBackend(
+                           NDBWorkflowElement ndbBackend)
+  {
+    synchronized (registeredNDBBackendsLock)
+    {
+      String ndbBackendID = ndbBackend.getWorkflowElementID();
+      NDBWorkflowElement existingNDBBackend =
+        registeredNDBBackends.get(ndbBackendID);
+
+      if (existingNDBBackend == null)
+      {
+        TreeMap<String, NDBWorkflowElement> newNDBBackends =
+          new TreeMap
+            <String, NDBWorkflowElement>(registeredNDBBackends);
+        newNDBBackends.put(ndbBackendID, ndbBackend);
+        registeredNDBBackends = newNDBBackends;
+      }
+    }
+  }
+
+
+
+  /**
+   * Deregisters a NDB backend with the server.
+   *
+   * @param workflowElementID  the identifier of the workflow element to remove
+   */
+  private static void deregisterNDBBackend(String workflowElementID)
+  {
+    synchronized (registeredNDBBackendsLock)
+    {
+      NDBWorkflowElement existingNDBBackend =
+        registeredNDBBackends.get(workflowElementID);
+
+      if (existingNDBBackend != null)
+      {
+        TreeMap<String, NDBWorkflowElement> newNDBBackends =
+             new TreeMap<String, NDBWorkflowElement>(
+                      registeredNDBBackends);
+        newNDBBackends.remove(workflowElementID);
+        registeredNDBBackends = newNDBBackends;
+      }
+    }
+  }
+
+
+
+  /**
+   * {@inheritDoc}
+   */
+  @Override
+  public void execute(Operation operation) throws CanceledOperationException {
+    switch (operation.getOperationType())
+    {
+      case BIND:
+        NDBBindOperation bindOperation =
+             new NDBBindOperation((BindOperation) operation);
+        bindOperation.processLocalBind(this);
+        break;
+
+      case SEARCH:
+        NDBSearchOperation searchOperation =
+             new NDBSearchOperation((SearchOperation) operation);
+        searchOperation.processLocalSearch(this);
+        break;
+
+      case ADD:
+        NDBAddOperation addOperation =
+             new NDBAddOperation((AddOperation) operation);
+        addOperation.processLocalAdd(this);
+        break;
+
+      case DELETE:
+        NDBDeleteOperation deleteOperation =
+             new NDBDeleteOperation((DeleteOperation) operation);
+        deleteOperation.processLocalDelete(this);
+        break;
+
+      case MODIFY:
+        NDBModifyOperation modifyOperation =
+             new NDBModifyOperation((ModifyOperation) operation);
+        modifyOperation.processLocalModify(this);
+        break;
+
+      case MODIFY_DN:
+        NDBModifyDNOperation modifyDNOperation =
+             new NDBModifyDNOperation((ModifyDNOperation) operation);
+        modifyDNOperation.processLocalModifyDN(this);
+        break;
+
+      case COMPARE:
+        NDBCompareOperation compareOperation =
+             new NDBCompareOperation((CompareOperation) operation);
+        compareOperation.processLocalCompare(this);
+        break;
+
+      case ABANDON:
+        // There is no processing for an abandon operation.
+        break;
+
+      default:
+        throw new AssertionError("Attempted to execute an invalid operation " +
+                                 "type:  " + operation.getOperationType() +
+                                 " (" + operation + ")");
+    }
+  }
+
+
+
+  /**
+   * Gets the backend associated with this NDB backend workflow element.
+   *
+   * @return The backend associated with this NDB backend workflow
+   *         element.
+   */
+  @Override
+  public Backend getBackend()
+  {
+    return backend;
+  }
+}
diff --git a/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/package-info.java b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/package-info.java
new file mode 100644
index 0000000..73e36c5
--- /dev/null
+++ b/opendj-sdk/opends/src/server/org/opends/server/workflowelement/ndb/package-info.java
@@ -0,0 +1,36 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE
+ * or https://OpenDS.dev.java.net/OpenDS.LICENSE.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at
+ * trunk/opends/resource/legal-notices/OpenDS.LICENSE.  If applicable,
+ * add the following below this CDDL HEADER, with the fields enclosed
+ * by brackets "[]" replaced with your own identifying information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2008-2009 Sun Microsystems, Inc.
+ */
+
+
+
+/**
+ * This package contains source for the NDB backend workflow element, which
+ * is used to process operations against data stored in NDB backend database.
+ */
+@org.opends.server.types.PublicAPI(
+     stability=org.opends.server.types.StabilityLevel.PRIVATE)
+package org.opends.server.workflowelement.ndb;

--
Gitblit v1.10.0