From caa38c1354824a2da50a8fbc8fc85ba1b0dfc7fe Mon Sep 17 00:00:00 2001
From: Jean-Noël Rouvignac <jean-noel.rouvignac@forgerock.com>
Date: Tue, 06 Oct 2015 14:43:56 +0000
Subject: [PATCH] OPENDJ-1719 Consider migrating JE backend to new PluggableBackend framework

---
 opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexQueryFactoryImpl.java              |    2 
 opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/spi/Importer.java                       |    5 
 opendj-server-legacy/src/test/java/org/opends/server/backends/pluggable/DN2IDTest.java                          |    3 
 opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEBackend.java                                |   52 +
 opendj-maven-plugin/src/main/resources/config/stylesheets/abbreviations.xsl                                     |    3 
 opendj-server-legacy/resource/schema/02-config.ldif                                                             |   29 
 opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/spi/Storage.java                        |   11 
 opendj-server-legacy/src/test/java/org/opends/server/authorization/dseecompat/ReferencesTestCase.java           |    4 
 opendj-server-legacy/src/main/java/org/opends/server/tasks/RebuildTask.java                                     |    2 
 opendj-server-legacy/src/test/java/org/opends/server/backends/jeb/JebTestCase.java                              |   16 
 opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/BackendMonitor.java                     |  132 +-
 opendj-server-legacy/resource/admin/abbreviations.xsl                                                           |    3 
 opendj-server-legacy/src/main/java/org/opends/server/backends/pdb/PDBMonitor.java                               |   36 
 opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEMonitor.java                                |  122 ++
 opendj-server-legacy/src/main/java/org/opends/server/backends/pdb/PDBStorage.java                               |   24 
 opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEStorage.java                                | 1375 +++++++++++++++++++++++++++
 opendj-maven-plugin/src/main/resources/config/xml/org/forgerock/opendj/server/config/JEBackendConfiguration.xml |  745 ++++++++++++++
 opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ConfigurableEnvironment.java                  |  340 +++---
 opendj-server-legacy/src/main/java/org/opends/server/api/MonitorProvider.java                                   |   62 
 19 files changed, 2,649 insertions(+), 317 deletions(-)

diff --git a/opendj-maven-plugin/src/main/resources/config/stylesheets/abbreviations.xsl b/opendj-maven-plugin/src/main/resources/config/stylesheets/abbreviations.xsl
index 341c889..f323b11 100644
--- a/opendj-maven-plugin/src/main/resources/config/stylesheets/abbreviations.xsl
+++ b/opendj-maven-plugin/src/main/resources/config/stylesheets/abbreviations.xsl
@@ -54,7 +54,8 @@
               or $value = 'des' or $value = 'aes' or $value = 'rc4'
               or $value = 'db' or $value = 'snmp' or $value = 'qos'
               or $value = 'ecl' or $value = 'ttl' or $value = 'jpeg'
-              or $value = 'pbkdf2' or $value = 'pkcs5s2' or $value = 'pdb'
+              or $value = 'pbkdf2' or $value = 'pkcs5s2'
+              or $value = 'je' or $value = 'pdb'
              "/>
   </xsl:template>
 </xsl:stylesheet>
diff --git a/opendj-maven-plugin/src/main/resources/config/xml/org/forgerock/opendj/server/config/JEBackendConfiguration.xml b/opendj-maven-plugin/src/main/resources/config/xml/org/forgerock/opendj/server/config/JEBackendConfiguration.xml
new file mode 100644
index 0000000..eea1d16
--- /dev/null
+++ b/opendj-maven-plugin/src/main/resources/config/xml/org/forgerock/opendj/server/config/JEBackendConfiguration.xml
@@ -0,0 +1,745 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ! CDDL HEADER START
+  !
+  ! The contents of this file are subject to the terms of the
+  ! Common Development and Distribution License, Version 1.0 only
+  ! (the "License").  You may not use this file except in compliance
+  ! with the License.
+  !
+  ! You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
+  ! or http://forgerock.org/license/CDDLv1.0.html.
+  ! See the License for the specific language governing permissions
+  ! and limitations under the License.
+  !
+  ! When distributing Covered Code, include this CDDL HEADER in each
+  ! file and include the License file at legal-notices/CDDLv1_0.txt.
+  ! If applicable, add the following below this CDDL HEADER, with the
+  ! fields enclosed by brackets "[]" replaced with your own identifying
+  ! information:
+  !      Portions Copyright [yyyy] [name of copyright owner]
+  !
+  ! CDDL HEADER END
+  !
+  !
+  !      Copyright 2007-2010 Sun Microsystems, Inc.
+  !      Portions Copyright 2010-2015 ForgeRock AS.
+  ! -->
+<adm:managed-object name="je-backend" plural-name="je-backends"
+  package="org.forgerock.opendj.server.config"
+  extends="pluggable-backend" xmlns:adm="http://opendj.forgerock.org/admin"
+  xmlns:ldap="http://opendj.forgerock.org/admin-ldap"
+  xmlns:cli="http://opendj.forgerock.org/admin-cli">
+  <adm:synopsis>
+    A <adm:user-friendly-name/> stores application
+    data in a Berkeley DB Java Edition database.
+  </adm:synopsis>
+  <adm:description>
+    It is the traditional "directory server" backend and is similar to
+    the backends provided by the Sun Java System Directory Server. The
+    <adm:user-friendly-name />
+    stores the entries in an encoded form and also provides indexes that
+    can be used to quickly locate target entries based on different
+    kinds of criteria.
+  </adm:description>
+  <adm:constraint>
+    <adm:synopsis>
+      The properties db-txn-no-sync and db-txn-write-no-sync are
+      mutually exclusive and cannot be both set at the same time.
+    </adm:synopsis>
+    <adm:condition>
+      <adm:implies>
+        <adm:contains property="enabled" value="true" />
+        <adm:not>
+          <adm:and>
+            <adm:contains property="db-txn-no-sync" value="true" />
+            <adm:contains property="db-txn-write-no-sync" value="true" />
+          </adm:and>
+        </adm:not>
+      </adm:implies>
+    </adm:condition>
+  </adm:constraint>
+  <adm:profile name="ldap">
+    <ldap:object-class>
+      <ldap:name>ds-cfg-je-backend</ldap:name>
+      <ldap:superior>ds-cfg-pluggable-backend</ldap:superior>
+    </ldap:object-class>
+  </adm:profile>
+  <adm:property-override name="java-class" advanced="true">
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>
+          org.opends.server.backends.jeb.JEBackend
+        </adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+  </adm:property-override>
+  <adm:property name="db-directory" mandatory="true">
+    <adm:TODO>Default this to the db/backend-id</adm:TODO>
+    <adm:synopsis>
+      Specifies the path to the filesystem directory that is used
+      to hold the Berkeley DB Java Edition database files containing the
+      data for this backend.
+    </adm:synopsis>
+    <adm:description>
+      The path may be either an absolute path or a path relative to the
+      directory containing the base of the <adm:product-name /> directory server
+      installation. The path may be any valid directory path in which
+      the server has appropriate permissions to read and write files and
+      has sufficient space to hold the database contents.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>db</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:string />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-directory</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-directory-permissions" advanced="true">
+    <adm:synopsis>
+      Specifies the permissions that should be applied to the directory
+      containing the server database files.
+    </adm:synopsis>
+    <adm:description>
+      They should be expressed as three-digit octal values, which is the
+      traditional representation for UNIX file permissions. The three
+      digits represent the permissions that are available for the
+      directory's owner, group members, and other users (in that order),
+      and each digit is the octal representation of the read, write, and
+      execute bits. Note that this only impacts permissions on the
+      database directory and not on the files written into that
+      directory. On UNIX systems, the user's umask controls
+      permissions given to the database files.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:server-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>700</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:string>
+        <adm:pattern>
+          <adm:regex>^7[0-7][0-7]$</adm:regex>
+          <adm:usage>MODE</adm:usage>
+          <adm:synopsis>
+            Any octal value between 700 and 777 (the owner must always
+            have read, write, and execute permissions on the directory).
+          </adm:synopsis>
+        </adm:pattern>
+      </adm:string>
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-directory-permissions</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-cache-percent">
+    <adm:synopsis>
+      Specifies the percentage of JVM memory to allocate to the database cache.
+    </adm:synopsis>
+    <adm:description>
+      Specifies the percentage of memory available to the JVM that
+      should be used for caching database contents. Note that this is
+      only used if the value of the db-cache-size property is set to
+      "0 MB". Otherwise, the value of that property is used instead
+      to control the cache size configuration.
+    </adm:description>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>50</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="1" upper-limit="90" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-cache-percent</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-cache-size">
+    <adm:synopsis>
+      The amount of JVM memory to allocate to the database cache.
+    </adm:synopsis>
+    <adm:description>
+      Specifies the amount of memory that should be used for caching
+      database contents. A value of "0 MB" indicates that the
+      db-cache-percent property should be used instead to specify the
+      cache size.
+    </adm:description>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>0 MB</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:size lower-limit="0 MB" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-cache-size</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-cleaner-min-utilization" advanced="true">
+    <adm:synopsis>
+      Specifies the minimum percentage of "live" data that the database
+      cleaner attempts to keep in database log files.
+    </adm:synopsis>
+    <adm:description>
+      If the amount of live data in any database log file drops below
+      this percentage, then the cleaner moves the remaining live
+      data in that file to the end of the database and deletes the
+      original file in order to keep the database relatively compact.
+    </adm:description>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>50</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="0" upper-limit="90" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-cleaner-min-utilization</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-run-cleaner" advanced="true">
+    <adm:synopsis>
+      Indicates whether the database cleaner threads should be
+      enabled.
+    </adm:synopsis>
+    <adm:description>
+      The cleaner threads are used to periodically compact the
+      database by identifying database files with a low (that is, less than
+      the amount specified by the db-cleaner-min-utilization property)
+      percentage of live data, moving the remaining live data to the end
+      of the log and deleting that file.
+    </adm:description>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>true</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:boolean />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-run-cleaner</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-evictor-lru-only" advanced="true">
+    <adm:synopsis>
+      Indicates whether the database should evict existing data from the
+      cache based on an LRU policy (where the least recently used
+      information will be evicted first).
+    </adm:synopsis>
+    <adm:description>
+      If set to "false", then the eviction keeps internal nodes of the underlying
+      Btree in the cache over leaf nodes, even if the leaf nodes have
+      been accessed more recently. This may be a better configuration
+      for databases in which only a very small portion of the data is
+      cached.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>false</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:boolean />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-evictor-lru-only</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-evictor-nodes-per-scan" advanced="true">
+    <adm:synopsis>
+      Specifies the number of Btree nodes that should be evicted from
+      the cache in a single pass if it is determined that it is
+      necessary to free existing data in order to make room for new
+      information.
+    </adm:synopsis>
+    <adm:description>
+      Changes to this property do not take effect until the backend is
+      restarted. It is recommended that you also change this property
+      when you set db-evictor-lru-only to false. This setting controls
+      the number of Btree nodes that are considered, or sampled, each
+      time a node is evicted. A setting of 10 often produces good
+      results, but this may vary from application to application. The
+      larger the nodes per scan, the more accurate the algorithm.
+      However, don't set it too high. When considering larger numbers of
+      nodes for each eviction, the evictor may delay the completion of a
+      given database operation, which impacts the response time of the
+      application thread. In JE 4.1 and later, setting this value too high
+      in an application that is largely CPU bound can reduce the
+      effectiveness of cache eviction. It's best to start with the default
+      value, and increase it gradually to see if it is beneficial for your
+      application.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>10</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="1" upper-limit="1000" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-evictor-nodes-per-scan</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-evictor-core-threads" advanced="true">
+    <adm:synopsis>
+      Specifies the core number of threads in the eviction thread pool.
+    </adm:synopsis>
+    <adm:description>
+      Specifies the core number of threads in the eviction thread pool.
+      These threads help keep memory usage within cache bounds,
+      offloading work from application threads. db-evictor-core-threads,
+      db-evictor-max-threads and db-evictor-keep-alive are used to configure
+      the core, max and keepalive attributes for the eviction thread pool.
+    </adm:description>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>1</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="0" upper-limit="2147483647" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-evictor-core-threads</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-evictor-max-threads" advanced="true">
+    <adm:synopsis>
+      Specifies the maximum number of threads in the eviction thread pool.
+    </adm:synopsis>
+    <adm:description>
+      Specifies the maximum number of threads in the eviction thread pool.
+      These threads help keep memory usage within cache bounds,
+      offloading work from application threads. db-evictor-core-threads,
+      db-evictor-max-threads and db-evictor-keep-alive are used to configure
+      the core, max and keepalive attributes for the eviction thread pool.
+    </adm:description>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>10</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="1" upper-limit="2147483647" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-evictor-max-threads</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-evictor-keep-alive" advanced="true">
+    <adm:synopsis>
+      The duration that excess threads in the eviction thread pool will
+      stay idle. After this period, idle threads will terminate.
+    </adm:synopsis>
+    <adm:description>
+      The duration that excess threads in the eviction thread pool will
+      stay idle. After this period, idle threads will terminate.
+      db-evictor-core-threads, db-evictor-max-threads and
+      db-evictor-keep-alive are used to configure the core, max and
+      keepalive attributes for the eviction thread pool.
+    </adm:description>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>600s</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:duration base-unit="s" lower-limit="1" upper-limit="86400" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-evictor-keep-alive</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-log-file-max" advanced="true">
+    <adm:synopsis>
+      Specifies the maximum size for a database log file.
+    </adm:synopsis>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>100mb</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:size lower-limit="1mb" upper-limit="4gib" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-log-file-max</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-log-filecache-size" advanced="true">
+    <adm:synopsis>
+      Specifies the size of the file handle cache.
+    </adm:synopsis>
+    <adm:description>
+      The file handle cache is used to keep as much opened log files
+      as possible. When the cache is smaller than the number of logs,
+      the database needs to close some handles and open log files it needs,
+      resulting in less optimal performances. Ideally, the size of the cache
+      should be higher than the number of files contained in the database.
+      Make sure the OS number of open files per process is also tuned
+      appropriately.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>100</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="3" upper-limit="2147483647" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-log-filecache-size</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-logging-file-handler-on" advanced="true">
+    <adm:synopsis>
+      Indicates whether the database should maintain a je.info file in
+      the same directory as the database log directory.
+    </adm:synopsis>
+    <adm:description>
+      This file contains information about the internal processing
+      performed by the underlying database.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>true</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:boolean />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-logging-file-handler-on</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-logging-level" advanced="true">
+    <adm:TODO>Use an enumeration</adm:TODO>
+    <adm:synopsis>
+      Specifies the log level that should be used by the database
+      when it is writing information into the je.info file.
+    </adm:synopsis>
+    <adm:description>
+      The database trace logging level is (in increasing order of
+      verbosity) chosen from: OFF, SEVERE, WARNING, INFO, CONFIG, FINE,
+      FINER, FINEST, ALL.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>CONFIG</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:string />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-logging-level</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-checkpointer-bytes-interval" advanced="true">
+    <adm:synopsis>
+      Specifies the maximum number of bytes that may be written to the
+      database before it is forced to perform a checkpoint.
+    </adm:synopsis>
+    <adm:description>
+      This can be used to bound the recovery time that may be required
+      if the database environment is opened without having been properly
+      closed. If this property is set to a non-zero value, the
+      checkpointer wakeup interval is not used. To use time-based
+      checkpointing, set this property to zero.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:server-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>500mb</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:size lower-limit="0b" upper-limit="9223372036854775807b" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-checkpointer-bytes-interval</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-checkpointer-wakeup-interval"
+    advanced="true">
+    <adm:synopsis>
+      Specifies the maximum length of time that may pass between
+      checkpoints.
+    </adm:synopsis>
+    <adm:description>
+      Note that this is only used if the value of the checkpointer
+      bytes interval is zero.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>30s</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:duration base-unit="s" lower-limit="1" upper-limit="4294" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-checkpointer-wakeup-interval</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-num-lock-tables" advanced="true">
+    <adm:synopsis>
+      Specifies the number of lock tables that are used by the underlying database.
+    </adm:synopsis>
+    <adm:description>
+      This can be particularly important to help improve scalability by
+      avoiding contention on systems with large numbers of CPUs. The
+      value of this configuration property should be set to a prime
+      number that is less than or equal to the number of worker threads
+      configured for use in the server.
+    </adm:description>
+    <adm:requires-admin-action>
+      <adm:component-restart />
+    </adm:requires-admin-action>
+    <adm:default-behavior>
+      <adm:alias>
+        <adm:synopsis>
+          Let the server decide.
+        </adm:synopsis>
+      </adm:alias>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="1" upper-limit="32767" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-num-lock-tables</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-num-cleaner-threads" advanced="true">
+    <adm:synopsis>
+      Specifies the number of threads that the backend should maintain
+      to keep the database log files at or near the desired utilization.
+    </adm:synopsis>
+    <adm:description>
+      In environments with high write throughput, multiple cleaner
+      threads may be required to maintain the desired utilization.
+    </adm:description>
+    <adm:default-behavior>
+      <adm:alias>
+        <adm:synopsis>
+          Let the server decide.
+        </adm:synopsis>
+      </adm:alias>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:integer lower-limit="1" />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-num-cleaner-threads</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-txn-no-sync" advanced="true">
+    <adm:synopsis>
+      Indicates whether database writes should be primarily written to
+      an internal buffer but not immediately written to disk.
+    </adm:synopsis>
+    <adm:description>
+      Setting the value of this configuration attribute to "true" may
+      improve write performance but could cause the most
+      recent changes to be lost if the <adm:product-name /> directory server or the
+      underlying JVM exits abnormally, or if an OS or hardware failure
+      occurs (a behavior similar to running with transaction durability
+      disabled in the Sun Java System Directory Server).
+    </adm:description>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>false</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:boolean />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-txn-no-sync</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="db-txn-write-no-sync" advanced="true">
+    <adm:synopsis>
+      Indicates whether the database should synchronously flush data as
+      it is written to disk.
+    </adm:synopsis>
+    <adm:description>
+      If this value is set to "false", then all data written to disk
+      is synchronously flushed to persistent storage and thereby
+      providing full durability. If it is set to "true", then data may
+      be cached for a period of time by the underlying operating system
+      before actually being written to disk. This may improve
+      performance, but could cause the most recent
+      changes to be lost in the event of an underlying OS or hardware
+      failure (but not in the case that the <adm:product-name /> directory server or
+      the JVM exits abnormally).
+    </adm:description>
+    <adm:default-behavior>
+      <adm:defined>
+        <adm:value>true</adm:value>
+      </adm:defined>
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:boolean />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-db-txn-write-no-sync</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+  <adm:property name="disk-low-threshold" advanced="true">
+      <adm:synopsis>
+        Low disk threshold to limit database updates
+      </adm:synopsis>
+      <adm:description>
+        Specifies the "low" free space on the disk. When the available
+        free space on the disk used by this database instance falls below the
+        value specified, protocol updates on this database are permitted only
+        by a user with the BYPASS_LOCKDOWN privilege.
+      </adm:description>
+      <adm:default-behavior>
+          <adm:defined>
+              <adm:value>200 megabytes</adm:value>
+          </adm:defined>
+      </adm:default-behavior>
+      <adm:syntax>
+          <adm:size lower-limit="0" />
+      </adm:syntax>
+      <adm:profile name="ldap">
+          <ldap:attribute>
+              <ldap:name>ds-cfg-disk-low-threshold</ldap:name>
+          </ldap:attribute>
+      </adm:profile>
+  </adm:property>
+  <adm:property name="disk-full-threshold" advanced="true">
+      <adm:synopsis>
+        Full disk threshold to limit database updates
+      </adm:synopsis>
+      <adm:description>
+        When the available free space on the disk used by this database
+        instance falls below the value specified, no updates
+        are permitted and the server returns an UNWILLING_TO_PERFORM error.
+        Updates are allowed again as soon as free space rises above the
+        threshold.
+      </adm:description>
+      <adm:default-behavior>
+          <adm:defined>
+              <adm:value>100 megabytes</adm:value>
+          </adm:defined>
+      </adm:default-behavior>
+      <adm:syntax>
+          <adm:size lower-limit="0" />
+      </adm:syntax>
+      <adm:profile name="ldap">
+          <ldap:attribute>
+              <ldap:name>ds-cfg-disk-full-threshold</ldap:name>
+          </ldap:attribute>
+      </adm:profile>
+  </adm:property>
+  <adm:property name="je-property" advanced="true"
+    multi-valued="true">
+    <adm:synopsis>
+      Specifies the database and environment properties for the Berkeley
+      DB Java Edition database serving the data for this backend.
+    </adm:synopsis>
+    <adm:description>
+      Any Berkeley DB Java Edition property can be specified using the
+      following form: property-name=property-value. Refer to <adm:product-name />
+      documentation for further information on related properties, their
+      implications, and range values. The definitive identification of
+      all the property parameters is available in the example.properties
+      file of Berkeley DB Java Edition distribution.
+    </adm:description>
+    <adm:default-behavior>
+      <adm:undefined />
+    </adm:default-behavior>
+    <adm:syntax>
+      <adm:string />
+    </adm:syntax>
+    <adm:profile name="ldap">
+      <ldap:attribute>
+        <ldap:name>ds-cfg-je-property</ldap:name>
+      </ldap:attribute>
+    </adm:profile>
+  </adm:property>
+</adm:managed-object>
diff --git a/opendj-server-legacy/resource/admin/abbreviations.xsl b/opendj-server-legacy/resource/admin/abbreviations.xsl
index 341c889..f323b11 100644
--- a/opendj-server-legacy/resource/admin/abbreviations.xsl
+++ b/opendj-server-legacy/resource/admin/abbreviations.xsl
@@ -54,7 +54,8 @@
               or $value = 'des' or $value = 'aes' or $value = 'rc4'
               or $value = 'db' or $value = 'snmp' or $value = 'qos'
               or $value = 'ecl' or $value = 'ttl' or $value = 'jpeg'
-              or $value = 'pbkdf2' or $value = 'pkcs5s2' or $value = 'pdb'
+              or $value = 'pbkdf2' or $value = 'pkcs5s2'
+              or $value = 'je' or $value = 'pdb'
              "/>
   </xsl:template>
 </xsl:stylesheet>
diff --git a/opendj-server-legacy/resource/schema/02-config.ldif b/opendj-server-legacy/resource/schema/02-config.ldif
index 06f22b2..fdca307 100644
--- a/opendj-server-legacy/resource/schema/02-config.ldif
+++ b/opendj-server-legacy/resource/schema/02-config.ldif
@@ -5787,4 +5787,33 @@
          ds-cfg-sort-order $
          ds-cfg-name )
   X-ORIGIN 'OpenDJ Directory Server' )
+objectClasses: ( 1.3.6.1.4.1.36733.2.1.2.26
+  NAME 'ds-cfg-je-backend'
+  SUP ds-cfg-pluggable-backend
+  STRUCTURAL
+  MUST ds-cfg-db-directory
+  MAY ( ds-cfg-db-directory-permissions $
+        ds-cfg-db-cache-percent $
+        ds-cfg-db-cache-size $
+        ds-cfg-db-cleaner-min-utilization $
+        ds-cfg-db-run-cleaner $
+        ds-cfg-db-evictor-lru-only $
+        ds-cfg-db-evictor-nodes-per-scan $
+        ds-cfg-db-evictor-core-threads $
+        ds-cfg-db-evictor-max-threads $
+        ds-cfg-db-evictor-keep-alive $
+        ds-cfg-db-log-file-max $
+        ds-cfg-db-log-filecache-size $
+        ds-cfg-db-logging-file-handler-on $
+        ds-cfg-db-logging-level $
+        ds-cfg-db-checkpointer-bytes-interval $
+        ds-cfg-db-checkpointer-wakeup-interval $
+        ds-cfg-db-num-lock-tables $
+        ds-cfg-db-num-cleaner-threads $
+        ds-cfg-db-txn-no-sync $
+        ds-cfg-db-txn-write-no-sync $
+        ds-cfg-disk-full-threshold $
+        ds-cfg-disk-low-threshold $
+        ds-cfg-je-property )
+  X-ORIGIN 'OpenDJ Directory Server' )
 
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/api/MonitorProvider.java b/opendj-server-legacy/src/main/java/org/opends/server/api/MonitorProvider.java
index a048f83..692eaa9 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/api/MonitorProvider.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/api/MonitorProvider.java
@@ -26,9 +26,12 @@
  */
 package org.opends.server.api;
 
-
 import java.util.List;
-import java.util.concurrent.*;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
 
 import org.forgerock.i18n.LocalizableMessage;
 import org.forgerock.opendj.config.server.ConfigException;
@@ -43,11 +46,9 @@
 /**
  * This class defines the set of methods and structures that must be
  * implemented by a Directory Server module that can provide usage,
- * performance, availability, or other kinds of monitor information
- * to clients.
+ * performance, availability, or other kinds of monitor information to clients.
  *
- * @param  <T>  The type of configuration handled by this monitor
- *              provider.
+ * @param  <T>  The type of configuration handled by this monitor provider.
  */
 @org.opends.server.types.PublicAPI(
      stability=org.opends.server.types.StabilityLevel.VOLATILE,
@@ -61,44 +62,35 @@
       Executors.newSingleThreadScheduledExecutor(
           new MonitorThreadFactory());
 
-  /**
-   * Thread factory used by the scheduled execution service.
-   */
-  private static final class MonitorThreadFactory implements
-      ThreadFactory
+  /** Thread factory used by the scheduled execution service. */
+  private static final class MonitorThreadFactory implements ThreadFactory
   {
-
-    /** {@inheritDoc} */
+    @Override
     public Thread newThread(Runnable r)
     {
-      Thread t =
-          new DirectoryThread(r, "Monitor Provider State Updater");
+      Thread t = new DirectoryThread(r, "Monitor Provider State Updater");
       t.setDaemon(true);
       return t;
     }
-
   }
 
   private ScheduledFuture<?> scheduledFuture;
 
   /**
-   * Initializes this monitor provider based on the information in the
-   * provided configuration entry.
+   * Initializes this monitor provider based on the information in the provided configuration entry.
    *
-   * @param  configuration  The configuration to use to initialize
-   *                        this monitor provider.
-   *
-   * @throws  ConfigException  If an unrecoverable problem arises in
-   *                           the process of performing the
-   *                           initialization.
-   *
-   * @throws  InitializationException  If a problem occurs during
-   *                                   initialization that is not
-   *                                   related to the server
-   *                                   configuration.
+   * @param configuration
+   *          The configuration to use to initialize this monitor provider.
+   * @throws ConfigException
+   *           If an unrecoverable problem arises in the process of performing the initialization.
+   * @throws InitializationException
+   *           If a problem occurs during initialization that is not related to the server
+   *           configuration.
    */
-  public abstract void initializeMonitorProvider(T configuration)
-         throws ConfigException, InitializationException;
+  public void initializeMonitorProvider(T configuration) throws ConfigException, InitializationException
+  {
+    // here to override
+  }
 
 
 
@@ -176,8 +168,7 @@
    */
   public ObjectClass getMonitorObjectClass()
   {
-    return DirectoryConfig.getObjectClass(OC_EXTENSIBLE_OBJECT_LC,
-                                          true);
+    return DirectoryConfig.getObjectClass(OC_EXTENSIBLE_OBJECT_LC, true);
   }
 
 
@@ -202,10 +193,7 @@
     {
       scheduledFuture.cancel(true);
     }
-
-    scheduledFuture =
-        SCHEDULER.scheduleAtFixedRate(updater, initialDelay,
-            period, unit);
+    scheduledFuture = SCHEDULER.scheduleAtFixedRate(updater, initialDelay, period, unit);
   }
 
 
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ConfigurableEnvironment.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ConfigurableEnvironment.java
index 49774fa..9437445 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ConfigurableEnvironment.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/ConfigurableEnvironment.java
@@ -35,27 +35,31 @@
 
 import org.forgerock.i18n.LocalizableMessage;
 import org.forgerock.i18n.slf4j.LocalizedLogger;
+import org.forgerock.opendj.config.server.ConfigException;
+import org.forgerock.opendj.ldap.ByteString;
 import org.opends.server.admin.BooleanPropertyDefinition;
 import org.opends.server.admin.DurationPropertyDefinition;
 import org.opends.server.admin.PropertyDefinition;
+import org.opends.server.admin.std.meta.JEBackendCfgDefn;
 import org.opends.server.admin.std.meta.LocalDBBackendCfgDefn;
+import org.opends.server.admin.std.server.BackendCfg;
+import org.opends.server.admin.std.server.JEBackendCfg;
 import org.opends.server.admin.std.server.LocalDBBackendCfg;
 import org.opends.server.config.ConfigConstants;
 import org.opends.server.core.DirectoryServer;
 import org.opends.server.core.MemoryQuota;
-import org.forgerock.opendj.config.server.ConfigException;
+import org.opends.server.types.DN;
+
 import com.sleepycat.je.Durability;
 import com.sleepycat.je.EnvironmentConfig;
 import com.sleepycat.je.dbi.MemoryBudget;
 
 import static com.sleepycat.je.EnvironmentConfig.*;
 
-import static org.opends.messages.ConfigMessages.*;
 import static org.opends.messages.BackendMessages.*;
+import static org.opends.messages.ConfigMessages.*;
 
-/**
- * This class maps JE properties to configuration attributes.
- */
+/** This class maps JE properties to configuration attributes. */
 public class ConfigurableEnvironment
 {
   private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
@@ -109,10 +113,7 @@
   public static final String ATTR_DATABASE_LOG_FILE_MAX =
        ConfigConstants.NAME_PREFIX_CFG + "db-log-file-max";
 
-  /**
-   * The name of the attribute which configures the database cache eviction
-   * algorithm.
-   */
+  /** The name of the attribute which configures the database cache eviction algorithm. */
   public static final String ATTR_EVICTOR_LRU_ONLY =
        ConfigConstants.NAME_PREFIX_CFG + "db-evictor-lru-only";
 
@@ -150,14 +151,10 @@
   public static final String ATTR_LOGGING_FILE_HANDLER_ON =
        ConfigConstants.NAME_PREFIX_CFG + "db-logging-file-handler-on";
 
-
-  /**
-   * The name of the attribute which configures the trace logging message level.
-   */
+  /** The name of the attribute which configures the trace logging message level. */
   public static final String ATTR_LOGGING_LEVEL =
        ConfigConstants.NAME_PREFIX_CFG + "db-logging-level";
 
-
   /**
    * The name of the attribute which configures how many bytes are written to
    * the log before the checkpointer runs.
@@ -165,7 +162,6 @@
   public static final String ATTR_CHECKPOINTER_BYTES_INTERVAL =
        ConfigConstants.NAME_PREFIX_CFG + "db-checkpointer-bytes-interval";
 
-
   /**
    * The name of the attribute which configures the amount of time between
    * runs of the checkpointer.
@@ -174,14 +170,10 @@
        ConfigConstants.NAME_PREFIX_CFG +
        "db-checkpointer-wakeup-interval";
 
-
-  /**
-   * The name of the attribute which configures the number of lock tables.
-   */
+  /** The name of the attribute which configures the number of lock tables. */
   public static final String ATTR_NUM_LOCK_TABLES =
        ConfigConstants.NAME_PREFIX_CFG + "db-num-lock-tables";
 
-
   /**
    * The name of the attribute which configures the number threads
    * allocated by the cleaner for log file processing.
@@ -189,36 +181,33 @@
   public static final String ATTR_NUM_CLEANER_THREADS =
        ConfigConstants.NAME_PREFIX_CFG + "db-num-cleaner-threads";
 
-  /**
-   * The name of the attribute which configures the size of the file
-   * handle cache.
-   */
+  /** The name of the attribute which configures the size of the file handle cache. */
   public static final String ATTR_LOG_FILECACHE_SIZE =
        ConfigConstants.NAME_PREFIX_CFG + "db-log-filecache-size";
 
-
-  /**
-   * The name of the attribute which may specify any native JE properties.
-   */
+  /** The name of the attribute which may specify any native JE properties. */
   public static final String ATTR_JE_PROPERTY =
        ConfigConstants.NAME_PREFIX_CFG + "je-property";
 
-
   /** A map of JE property names to the corresponding configuration attribute. */
   private static HashMap<String, String> attrMap = new HashMap<>();
   /**
-   * A map of configuration attribute names to the corresponding configuration
-   * object getter method.
+   * A map of configuration attribute names to the corresponding configuration object getter method.
    */
-  private static HashMap<String, Method> methodMap = new HashMap<>();
+  @RemoveOnceLocalDBBackendIsPluggable
+  private static Map<String, Method> localDbMethodMap = new HashMap<>();
+  /** A map of configuration attribute names to the corresponding configuration PropertyDefinition. */
+  @RemoveOnceLocalDBBackendIsPluggable
+  private static Map<String, PropertyDefinition<?>> localDbDefnMap = new HashMap<>();
+
   /**
-   * A map of configuration attribute names to the corresponding configuration
-   * PropertyDefinition.
+   * A map of configuration attribute names to the corresponding configuration object getter method.
    */
-  private static HashMap<String, PropertyDefinition> defnMap = new HashMap<>();
+  private static Map<String, Method> jebMethodMap = new HashMap<>();
+  /** A map of configuration attribute names to the corresponding configuration PropertyDefinition. */
+  private static Map<String, PropertyDefinition<?>> jebDefnMap = new HashMap<>();
 
-
-  /** Pulled from resource/admin/ABBREVIATIONS.xsl.  db is mose common. */
+  /** Pulled from resource/admin/ABBREVIATIONS.xsl. db is mose common. */
   private static final List<String> ABBREVIATIONS = Arrays.asList(new String[]
           {"aci", "ip", "ssl", "dn", "rdn", "jmx", "smtp", "http",
            "https", "ldap", "ldaps", "ldif", "jdbc", "tcp", "tls",
@@ -241,7 +230,6 @@
     return buffer.toString();
   }
 
-
   /**
    * Register a JE property and its corresponding configuration attribute.
    *
@@ -255,32 +243,45 @@
   {
     // Strip off NAME_PREFIX_CFG.
     String baseName = attrName.substring(7);
-
     String methodBaseName = propNametoCamlCase(baseName);
 
+    registerLocalDbProp(attrName, methodBaseName);
+    registerJebProp(attrName, methodBaseName);
+    attrMap.put(propertyName, attrName);
+  }
+
+  @RemoveOnceLocalDBBackendIsPluggable
+  private static void registerLocalDbProp(String attrName, String methodBaseName) throws Exception
+  {
     Class<LocalDBBackendCfg> configClass = LocalDBBackendCfg.class;
     LocalDBBackendCfgDefn defn = LocalDBBackendCfgDefn.getInstance();
     Class<? extends LocalDBBackendCfgDefn> defClass = defn.getClass();
 
-    PropertyDefinition propDefn =
-         (PropertyDefinition)defClass.getMethod("get" + methodBaseName +
-         "PropertyDefinition").invoke(defn);
+    String propName = "get" + methodBaseName + "PropertyDefinition";
+    PropertyDefinition<?> propDefn = (PropertyDefinition<?>) defClass.getMethod(propName).invoke(defn);
 
-    String methodName;
-    if (propDefn instanceof BooleanPropertyDefinition)
-    {
-      methodName = "is" + methodBaseName;
-    }
-    else
-    {
-      methodName = "get" + methodBaseName;
-    }
+    String methodPrefix = propDefn instanceof BooleanPropertyDefinition ? "is" : "get";
+    String methodName = methodPrefix + methodBaseName;
 
-    defnMap.put(attrName, propDefn);
-    methodMap.put(attrName, configClass.getMethod(methodName));
-    attrMap.put(propertyName, attrName);
+    localDbDefnMap.put(attrName, propDefn);
+    localDbMethodMap.put(attrName, configClass.getMethod(methodName));
   }
 
+  private static void registerJebProp(String attrName, String methodBaseName) throws Exception
+  {
+    Class<JEBackendCfg> configClass = JEBackendCfg.class;
+    JEBackendCfgDefn defn = JEBackendCfgDefn.getInstance();
+    Class<? extends JEBackendCfgDefn> defClass = defn.getClass();
+
+    String propName = "get" + methodBaseName + "PropertyDefinition";
+    PropertyDefinition<?> propDefn = (PropertyDefinition<?>) defClass.getMethod(propName).invoke(defn);
+
+    String methodPrefix = propDefn instanceof BooleanPropertyDefinition ? "is" : "get";
+    String methodName = methodPrefix + methodBaseName;
+
+    jebDefnMap.put(attrName, propDefn);
+    jebMethodMap.put(attrName, configClass.getMethod(methodName));
+  }
 
   /**
    * Get the name of the configuration attribute associated with a JE property.
@@ -295,15 +296,16 @@
   /**
    * Get the value of a JE property that is mapped to a configuration attribute.
    * @param cfg The configuration containing the property values.
-   * @param attrName The conriguration attribute type name.
+   * @param attrName The configuration attribute type name.
    * @return The string value of the JE property.
    */
-  private static String getPropertyValue(LocalDBBackendCfg cfg, String attrName)
+  private static String getPropertyValue(BackendCfg cfg, String attrName, ByteString backendId)
   {
     try
     {
-      PropertyDefinition propDefn = defnMap.get(attrName);
-      Method method = methodMap.get(attrName);
+      final boolean isLocalDb = cfg instanceof LocalDBBackendCfg;
+      PropertyDefinition<?> propDefn = (isLocalDb ? localDbDefnMap : jebDefnMap).get(attrName);
+      Method method = (isLocalDb ? localDbMethodMap : jebMethodMap).get(attrName);
 
       if (propDefn instanceof DurationPropertyDefinition)
       {
@@ -329,7 +331,7 @@
           value = Integer.valueOf(Math.max(24, cpus * 2));
 
           logger.debug(INFO_ERGONOMIC_SIZING_OF_JE_CLEANER_THREADS,
-              cfg.dn().rdn().getAttributeValue(0), (Number) value);
+              backendId, (Number) value);
         }
         else if (attrName.equals(ATTR_NUM_LOCK_TABLES)
             && value == null)
@@ -343,7 +345,7 @@
           BigInteger tmp = BigInteger.valueOf((cleaners + workers) * 2);
           value = tmp.nextProbablePrime();
 
-          logger.debug(INFO_ERGONOMIC_SIZING_OF_JE_LOCK_TABLES, cfg.dn().rdn().getAttributeValue(0), (Number) value);
+          logger.debug(INFO_ERGONOMIC_SIZING_OF_JE_LOCK_TABLES, backendId, (Number) value);
         }
 
         return String.valueOf(value);
@@ -356,8 +358,6 @@
     }
   }
 
-
-
   static
   {
     // Register the parameters that have JE property names.
@@ -387,8 +387,6 @@
     }
   }
 
-
-
   /**
    * Create a JE environment configuration with default values.
    *
@@ -435,7 +433,28 @@
     return envConfig;
   }
 
+  /**
+   * Parse a configuration associated with a JE environment and create an
+   * environment config from it.
+   *
+   * @param cfg The configuration to be parsed.
+   * @return An environment config instance corresponding to the config entry.
+   * @throws ConfigException If there is an error in the provided configuration
+   * entry.
+   */
+  public static EnvironmentConfig parseConfigEntry(JEBackendCfg cfg) throws ConfigException
+  {
+    validateDbCacheSize(cfg.getDBCacheSize());
 
+    EnvironmentConfig envConfig = defaultConfig();
+    setDurability(envConfig, cfg.isDBTxnNoSync(), cfg.isDBTxnWriteNoSync());
+    setJEProperties(cfg, envConfig, cfg.dn().rdn().getAttributeValue(0));
+    setDBLoggingLevel(envConfig, cfg.getDBLoggingLevel(), cfg.dn(), cfg.isDBLoggingFileHandlerOn());
+
+    // See if there are any native JE properties specified in the config
+    // and if so try to parse, evaluate and set them.
+    return setJEProperties(envConfig, cfg.getJEProperty(), attrMap);
+  }
 
   /**
    * Parse a configuration associated with a JE environment and create an
@@ -446,77 +465,89 @@
    * @throws ConfigException If there is an error in the provided configuration
    * entry.
    */
-  public static EnvironmentConfig parseConfigEntry(LocalDBBackendCfg cfg)
-       throws ConfigException
+  @RemoveOnceLocalDBBackendIsPluggable
+  public static EnvironmentConfig parseConfigEntry(LocalDBBackendCfg cfg) throws ConfigException
   {
-    // See if the db cache size setting is valid.
-    if(cfg.getDBCacheSize() != 0)
-    {
-      if (MemoryBudget.getRuntimeMaxMemory() < cfg.getDBCacheSize()) {
-        throw new ConfigException(
-            ERR_BACKEND_CONFIG_CACHE_SIZE_GREATER_THAN_JVM_HEAP.get(
-                cfg.getDBCacheSize(), MemoryBudget.getRuntimeMaxMemory()));
-      }
-      if (cfg.getDBCacheSize() < MemoryBudget.MIN_MAX_MEMORY_SIZE) {
-        throw new ConfigException(
-            ERR_CONFIG_JEB_CACHE_SIZE_TOO_SMALL.get(
-                cfg.getDBCacheSize(), MemoryBudget.MIN_MAX_MEMORY_SIZE));
-      }
-      MemoryQuota memoryQuota = DirectoryServer.getInstance().getServerContext().getMemoryQuota();
-      if (!memoryQuota.acquireMemory(cfg.getDBCacheSize()))
-      {
-        logger.warn(ERR_BACKEND_CONFIG_CACHE_SIZE_GREATER_THAN_JVM_HEAP.get(
-            cfg.getDBCacheSize(), memoryQuota.getMaxMemory()));
-      }
-    }
+    validateDbCacheSize(cfg.getDBCacheSize());
 
     EnvironmentConfig envConfig = defaultConfig();
-
-    // Durability settings.
-    if (cfg.isDBTxnNoSync() && cfg.isDBTxnWriteNoSync())
-    {
-      throw new ConfigException(
-              ERR_CONFIG_JEB_DURABILITY_CONFLICT.get());
-    }
-    if (cfg.isDBTxnNoSync())
-    {
-      envConfig.setDurability(Durability.COMMIT_NO_SYNC);
-    }
-    if (cfg.isDBTxnWriteNoSync())
-    {
-      envConfig.setDurability(Durability.COMMIT_WRITE_NO_SYNC);
-    }
-
-    // Iterate through the config attributes associated with a JE property.
-    for (Map.Entry<String, String> mapEntry : attrMap.entrySet())
-    {
-      String jeProperty = mapEntry.getKey();
-      String attrName = mapEntry.getValue();
-
-      String value = getPropertyValue(cfg, attrName);
-      envConfig.setConfigParam(jeProperty, value);
-    }
-
-    // Set logging and file handler levels.
-    Logger parent = Logger.getLogger("com.sleepycat.je");
-    try
-    {
-      parent.setLevel(Level.parse(cfg.getDBLoggingLevel()));
-    }
-    catch (Exception e)
-    {
-      throw new ConfigException(ERR_JEB_INVALID_LOGGING_LEVEL.get(cfg.getDBLoggingLevel(), cfg.dn()));
-    }
-
-    final Level level = cfg.isDBLoggingFileHandlerOn() ? Level.ALL : Level.OFF;
-    envConfig.setConfigParam(FILE_LOGGING_LEVEL, level.getName());
+    setDurability(envConfig, cfg.isDBTxnNoSync(), cfg.isDBTxnWriteNoSync());
+    setJEProperties(cfg, envConfig, cfg.dn().rdn().getAttributeValue(0));
+    setDBLoggingLevel(envConfig, cfg.getDBLoggingLevel(), cfg.dn(), cfg.isDBLoggingFileHandlerOn());
 
     // See if there are any native JE properties specified in the config
     // and if so try to parse, evaluate and set them.
     return setJEProperties(envConfig, cfg.getJEProperty(), attrMap);
   }
 
+  private static void validateDbCacheSize(long dbCacheSize) throws ConfigException
+  {
+    if (dbCacheSize != 0)
+    {
+      if (MemoryBudget.getRuntimeMaxMemory() < dbCacheSize)
+      {
+        throw new ConfigException(ERR_BACKEND_CONFIG_CACHE_SIZE_GREATER_THAN_JVM_HEAP.get(
+            dbCacheSize, MemoryBudget.getRuntimeMaxMemory()));
+      }
+      if (dbCacheSize < MemoryBudget.MIN_MAX_MEMORY_SIZE)
+      {
+        throw new ConfigException(ERR_CONFIG_JEB_CACHE_SIZE_TOO_SMALL.get(
+            dbCacheSize, MemoryBudget.MIN_MAX_MEMORY_SIZE));
+      }
+      MemoryQuota memoryQuota = DirectoryServer.getInstance().getServerContext().getMemoryQuota();
+      if (!memoryQuota.acquireMemory(dbCacheSize))
+      {
+        logger.warn(ERR_BACKEND_CONFIG_CACHE_SIZE_GREATER_THAN_JVM_HEAP.get(
+            dbCacheSize, memoryQuota.getMaxMemory()));
+      }
+    }
+  }
 
+  private static void setDurability(EnvironmentConfig envConfig, boolean dbTxnNoSync, boolean dbTxnWriteNoSync)
+      throws ConfigException
+  {
+    if (dbTxnNoSync && dbTxnWriteNoSync)
+    {
+      throw new ConfigException(ERR_CONFIG_JEB_DURABILITY_CONFLICT.get());
+    }
+    if (dbTxnNoSync)
+    {
+      envConfig.setDurability(Durability.COMMIT_NO_SYNC);
+    }
+    else if (dbTxnWriteNoSync)
+    {
+      envConfig.setDurability(Durability.COMMIT_WRITE_NO_SYNC);
+    }
+  }
+
+  private static void setJEProperties(BackendCfg cfg, EnvironmentConfig envConfig, ByteString backendId)
+  {
+    for (Map.Entry<String, String> mapEntry : attrMap.entrySet())
+    {
+      String jeProperty = mapEntry.getKey();
+      String attrName = mapEntry.getValue();
+
+      String value = getPropertyValue(cfg, attrName, backendId);
+      envConfig.setConfigParam(jeProperty, value);
+    }
+  }
+
+  private static void setDBLoggingLevel(EnvironmentConfig envConfig, String loggingLevel, DN dn,
+      boolean loggingFileHandlerOn) throws ConfigException
+  {
+    Logger parent = Logger.getLogger("com.sleepycat.je");
+    try
+    {
+      parent.setLevel(Level.parse(loggingLevel));
+    }
+    catch (Exception e)
+    {
+      throw new ConfigException(ERR_JEB_INVALID_LOGGING_LEVEL.get(loggingLevel, dn));
+    }
+
+    final Level level = loggingFileHandlerOn ? Level.ALL : Level.OFF;
+    envConfig.setConfigParam(FILE_LOGGING_LEVEL, level.getName());
+  }
 
   /**
    * Parse, validate and set native JE environment properties for
@@ -549,43 +580,36 @@
     for (String jeEntry : jeProperties)
     {
       StringTokenizer st = new StringTokenizer(jeEntry, "=");
-      if (st.countTokens() == 2) {
-        String jePropertyName = st.nextToken();
-        String jePropertyValue = st.nextToken();
-        // Check if it is a duplicate.
-        if (uniqueJEProperties.contains(jePropertyName)) {
-          LocalizableMessage message = ERR_CONFIG_JE_DUPLICATE_PROPERTY.get(
-              jePropertyName);
-            throw new ConfigException(message);
+      if (st.countTokens() != 2)
+      {
+        throw new ConfigException(ERR_CONFIG_JE_PROPERTY_INVALID_FORM.get(jeEntry));
+      }
+
+      String jePropertyName = st.nextToken();
+      String jePropertyValue = st.nextToken();
+      // Check if it is a duplicate.
+      if (uniqueJEProperties.contains(jePropertyName)) {
+        throw new ConfigException(ERR_CONFIG_JE_DUPLICATE_PROPERTY.get(jePropertyName));
+      }
+
+      // Set JE property.
+      try {
+        envConfig.setConfigParam(jePropertyName, jePropertyValue);
+        // If this property shadows an existing config attribute.
+        if (configAttrMap.containsKey(jePropertyName)) {
+          LocalizableMessage message = ERR_CONFIG_JE_PROPERTY_SHADOWS_CONFIG.get(
+            jePropertyName, attrMap.get(jePropertyName));
+          throw new ConfigException(message);
         }
-        // Set JE property.
-        try {
-          envConfig.setConfigParam(jePropertyName, jePropertyValue);
-          // If this property shadows an existing config attribute.
-          if (configAttrMap.containsKey(jePropertyName)) {
-            LocalizableMessage message = ERR_CONFIG_JE_PROPERTY_SHADOWS_CONFIG.get(
-              jePropertyName, attrMap.get(jePropertyName));
-            throw new ConfigException(message);
-          }
-          // Add this property to unique set.
-          uniqueJEProperties.add(jePropertyName);
-        } catch(IllegalArgumentException e) {
-          logger.traceException(e);
-          LocalizableMessage message =
-            ERR_CONFIG_JE_PROPERTY_INVALID.get(
-            jeEntry, e.getMessage());
-          throw new ConfigException(message, e.getCause());
-        }
-      } else {
-        LocalizableMessage message =
-          ERR_CONFIG_JE_PROPERTY_INVALID_FORM.get(jeEntry);
-        throw new ConfigException(message);
+        // Add this property to unique set.
+        uniqueJEProperties.add(jePropertyName);
+      } catch(IllegalArgumentException e) {
+        logger.traceException(e);
+        LocalizableMessage message = ERR_CONFIG_JE_PROPERTY_INVALID.get(jeEntry, e.getMessage());
+        throw new ConfigException(message, e.getCause());
       }
     }
 
     return envConfig;
   }
-
-
-
 }
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEBackend.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEBackend.java
new file mode 100644
index 0000000..530ef09
--- /dev/null
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEBackend.java
@@ -0,0 +1,52 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
+ * or http://forgerock.org/license/CDDLv1.0.html.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at legal-notices/CDDLv1_0.txt.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2015 ForgeRock AS
+ */
+package org.opends.server.backends.jeb;
+
+import java.util.List;
+
+import org.forgerock.i18n.LocalizableMessage;
+import org.forgerock.opendj.config.server.ConfigException;
+import org.opends.server.admin.std.server.JEBackendCfg;
+import org.opends.server.backends.pluggable.BackendImpl;
+import org.opends.server.backends.pluggable.spi.Storage;
+import org.opends.server.core.ServerContext;
+
+/** Class defined in the configuration for this backend type. */
+public final class JEBackend extends BackendImpl<JEBackendCfg>
+{
+  @Override
+  public boolean isConfigurationAcceptable(JEBackendCfg cfg, List<LocalizableMessage> unacceptableReasons,
+      ServerContext serverContext)
+  {
+    return JEStorage.isConfigurationAcceptable(cfg, unacceptableReasons, serverContext);
+  }
+
+  @Override
+  protected Storage configureStorage(JEBackendCfg cfg, ServerContext serverContext) throws ConfigException
+  {
+    return new JEStorage(cfg, serverContext);
+  }
+}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEMonitor.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEMonitor.java
new file mode 100644
index 0000000..1c3c568
--- /dev/null
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEMonitor.java
@@ -0,0 +1,122 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
+ * or http://forgerock.org/license/CDDLv1.0.html.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at legal-notices/CDDLv1_0.txt.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2006-2010 Sun Microsystems, Inc.
+ *      Portions Copyright 2014-2015 ForgeRock AS
+ */
+package org.opends.server.backends.jeb;
+
+import static org.opends.server.util.StaticUtils.*;
+
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.forgerock.i18n.slf4j.LocalizedLogger;
+import org.forgerock.opendj.ldap.schema.Syntax;
+import org.opends.server.admin.std.server.MonitorProviderCfg;
+import org.opends.server.api.MonitorProvider;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.types.Attribute;
+import org.opends.server.types.AttributeType;
+import org.opends.server.types.Attributes;
+
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.JEVersion;
+import com.sleepycat.je.StatsConfig;
+
+/** Monitoring class for JE, populating cn=monitor statistics using reflection on objects methods. */
+final class JEMonitor extends MonitorProvider<MonitorProviderCfg>
+{
+  private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
+
+  /** The name of this monitor instance. */
+  private final String name;
+  /** The environment to be monitored. */
+  private final Environment env;
+
+  JEMonitor(String name, Environment env)
+  {
+    this.name = name;
+    this.env = env;
+  }
+
+  @Override
+  public String getMonitorInstanceName()
+  {
+    return name;
+  }
+
+  @Override
+  public List<Attribute> getMonitorData()
+  {
+    try
+    {
+      List<Attribute> monitorAttrs = new ArrayList<>();
+
+      monitorAttrs.add(Attributes.create("JEVersion", JEVersion.CURRENT_VERSION.getVersionString()));
+
+      StatsConfig statsConfig = new StatsConfig();
+      addAttributesForStatsObject(monitorAttrs, "Environment", env.getStats(statsConfig));
+      addAttributesForStatsObject(monitorAttrs, "Transaction", env.getTransactionStats(statsConfig));
+
+      return monitorAttrs;
+    }
+    catch (Exception e)
+    {
+      logger.traceException(e);
+      return Collections.singletonList(Attributes.create("JEInfo", stackTraceToSingleLineString(e)));
+    }
+  }
+
+  private void addAttributesForStatsObject(List<Attribute> monitorAttrs, String attrPrefix, Object stats)
+  {
+    for (Method method : stats.getClass().getMethods())
+    {
+      final Class<?> returnType = method.getReturnType();
+      if (method.getName().startsWith("get")
+          && (returnType.equals(int.class) || returnType.equals(long.class)))
+      {
+        addStatAttribute(monitorAttrs, attrPrefix, stats, method);
+      }
+    }
+  }
+
+  private void addStatAttribute(List<Attribute> monitorAttrs, String attrPrefix, Object stats, Method method)
+  {
+    final Syntax integerSyntax = DirectoryServer.getDefaultIntegerSyntax();
+    try
+    {
+      // Remove the 'get' from the method name and add the prefix.
+      String attrName = attrPrefix + method.getName().substring(3);
+
+      AttributeType attrType = DirectoryServer.getDefaultAttributeType(attrName, integerSyntax);
+      monitorAttrs.add(Attributes.create(attrType, String.valueOf(method.invoke(stats))));
+    }
+    catch (Exception e)
+    {
+      logger.traceException(e);
+    }
+  }
+}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEStorage.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEStorage.java
new file mode 100644
index 0000000..c951c66
--- /dev/null
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/jeb/JEStorage.java
@@ -0,0 +1,1375 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License, Version 1.0 only
+ * (the "License").  You may not use this file except in compliance
+ * with the License.
+ *
+ * You can obtain a copy of the license at legal-notices/CDDLv1_0.txt
+ * or http://forgerock.org/license/CDDLv1.0.html.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at legal-notices/CDDLv1_0.txt.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information:
+ *      Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ *
+ *
+ *      Copyright 2015 ForgeRock AS
+ */
+package org.opends.server.backends.jeb;
+
+import static com.sleepycat.je.EnvironmentConfig.*;
+import static com.sleepycat.je.OperationStatus.*;
+
+import static org.forgerock.util.Utils.*;
+import static org.opends.messages.BackendMessages.*;
+import static org.opends.messages.ConfigMessages.*;
+import static org.opends.messages.UtilityMessages.*;
+import static org.opends.server.util.StaticUtils.*;
+
+import java.io.File;
+import java.io.FileFilter;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+
+import org.forgerock.i18n.LocalizableMessage;
+import org.forgerock.i18n.slf4j.LocalizedLogger;
+import org.forgerock.opendj.config.server.ConfigChangeResult;
+import org.forgerock.opendj.config.server.ConfigException;
+import org.forgerock.opendj.ldap.ByteSequence;
+import org.forgerock.opendj.ldap.ByteString;
+import org.forgerock.util.Reject;
+import org.opends.server.admin.server.ConfigurationChangeListener;
+import org.opends.server.admin.std.server.JEBackendCfg;
+import org.opends.server.api.Backupable;
+import org.opends.server.api.DiskSpaceMonitorHandler;
+import org.opends.server.backends.pluggable.spi.AccessMode;
+import org.opends.server.backends.pluggable.spi.Cursor;
+import org.opends.server.backends.pluggable.spi.Importer;
+import org.opends.server.backends.pluggable.spi.ReadOnlyStorageException;
+import org.opends.server.backends.pluggable.spi.ReadOperation;
+import org.opends.server.backends.pluggable.spi.SequentialCursor;
+import org.opends.server.backends.pluggable.spi.Storage;
+import org.opends.server.backends.pluggable.spi.StorageRuntimeException;
+import org.opends.server.backends.pluggable.spi.StorageStatus;
+import org.opends.server.backends.pluggable.spi.TreeName;
+import org.opends.server.backends.pluggable.spi.UpdateFunction;
+import org.opends.server.backends.pluggable.spi.WriteOperation;
+import org.opends.server.backends.pluggable.spi.WriteableTransaction;
+import org.opends.server.core.DirectoryServer;
+import org.opends.server.core.MemoryQuota;
+import org.opends.server.core.ServerContext;
+import org.opends.server.extensions.DiskSpaceMonitor;
+import org.opends.server.types.BackupConfig;
+import org.opends.server.types.BackupDirectory;
+import org.opends.server.types.DirectoryException;
+import org.opends.server.types.FilePermission;
+import org.opends.server.types.RestoreConfig;
+import org.opends.server.util.BackupManager;
+
+import com.sleepycat.je.CursorConfig;
+import com.sleepycat.je.Database;
+import com.sleepycat.je.DatabaseConfig;
+import com.sleepycat.je.DatabaseEntry;
+import com.sleepycat.je.DatabaseException;
+import com.sleepycat.je.DatabaseNotFoundException;
+import com.sleepycat.je.Durability;
+import com.sleepycat.je.Environment;
+import com.sleepycat.je.EnvironmentConfig;
+import com.sleepycat.je.LockMode;
+import com.sleepycat.je.OperationStatus;
+import com.sleepycat.je.Transaction;
+import com.sleepycat.je.TransactionConfig;
+
+/** Berkeley DB Java Edition (JE for short) database implementation of the {@link Storage} engine. */
+public final class JEStorage implements Storage, Backupable, ConfigurationChangeListener<JEBackendCfg>,
+    DiskSpaceMonitorHandler
+{
+  /** JE implementation of the {@link Cursor} interface. */
+  private static final class CursorImpl implements Cursor<ByteString, ByteString>
+  {
+    private ByteString currentKey;
+    private ByteString currentValue;
+    private boolean isDefined;
+    private final com.sleepycat.je.Cursor cursor;
+    private final DatabaseEntry dbKey = new DatabaseEntry();
+    private final DatabaseEntry dbValue = new DatabaseEntry();
+
+    private CursorImpl(com.sleepycat.je.Cursor cursor)
+    {
+      this.cursor = cursor;
+    }
+
+    @Override
+    public void close()
+    {
+      closeSilently(cursor);
+    }
+
+    @Override
+    public boolean isDefined()
+    {
+      return isDefined;
+    }
+
+    @Override
+    public ByteString getKey()
+    {
+      if (currentKey == null)
+      {
+        throwIfNotSuccess();
+        currentKey = ByteString.wrap(dbKey.getData());
+      }
+      return currentKey;
+    }
+
+    @Override
+    public ByteString getValue()
+    {
+      if (currentValue == null)
+      {
+        throwIfNotSuccess();
+        currentValue = ByteString.wrap(dbValue.getData());
+      }
+      return currentValue;
+    }
+
+    @Override
+    public boolean next()
+    {
+      clearCurrentKeyAndValue();
+      try
+      {
+        isDefined = cursor.getNext(dbKey, dbValue, null) == SUCCESS;
+        return isDefined;
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+
+    @Override
+    public boolean positionToKey(final ByteSequence key)
+    {
+      clearCurrentKeyAndValue();
+      setData(dbKey, key);
+      try
+      {
+        isDefined = cursor.getSearchKey(dbKey, dbValue, null) == SUCCESS;
+        return isDefined;
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+
+    @Override
+    public boolean positionToKeyOrNext(final ByteSequence key)
+    {
+      clearCurrentKeyAndValue();
+      setData(dbKey, key);
+      try
+      {
+        isDefined = cursor.getSearchKeyRange(dbKey, dbValue, null) == SUCCESS;
+        return isDefined;
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+
+    @Override
+    public boolean positionToIndex(int index)
+    {
+      clearCurrentKeyAndValue();
+      try
+      {
+        isDefined = cursor.getFirst(dbKey, dbValue, null) == SUCCESS;
+        if (!isDefined)
+        {
+          return false;
+        }
+
+        // equivalent to READ_UNCOMMITTED
+        long skipped = cursor.skipNext(index, dbKey, dbValue, null);
+        if (skipped == index)
+        {
+          isDefined = cursor.getCurrent(dbKey, dbValue, null) == SUCCESS;
+        }
+        else
+        {
+          isDefined = false;
+        }
+        return isDefined;
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+
+    @Override
+    public boolean positionToLastKey()
+    {
+      clearCurrentKeyAndValue();
+      try
+      {
+        isDefined = cursor.getLast(dbKey, dbValue, null) == SUCCESS;
+        return isDefined;
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+
+    private void clearCurrentKeyAndValue()
+    {
+      currentKey = null;
+      currentValue = null;
+    }
+
+    private void throwIfNotSuccess()
+    {
+      if (!isDefined())
+      {
+        throw new NoSuchElementException();
+      }
+    }
+  }
+
+  /** JE implementation of the {@link Importer} interface. */
+  private final class ImporterImpl implements Importer
+  {
+    private final Map<TreeName, Database> trees = new HashMap<>();
+
+    private Database getOrOpenTree(TreeName treeName)
+    {
+      return getOrOpenTree0(null, trees, treeName);
+    }
+
+    @Override
+    public void put(final TreeName treeName, final ByteSequence key, final ByteSequence value)
+    {
+      try
+      {
+        getOrOpenTree(treeName).put(null, db(key), db(value));
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+
+    @Override
+    public ByteString read(final TreeName treeName, final ByteSequence key)
+    {
+      try
+      {
+        DatabaseEntry dbValue = new DatabaseEntry();
+        boolean isDefined = getOrOpenTree(treeName).get(null, db(key), dbValue, null) == SUCCESS;
+        return valueToBytes(dbValue, isDefined);
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+
+    @Override
+    public SequentialCursor<ByteString, ByteString> openCursor(TreeName treeName)
+    {
+      try
+      {
+        return new CursorImpl(getOrOpenTree(treeName).openCursor(null, new CursorConfig()));
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+
+    @Override
+    public void clearTree(TreeName treeName)
+    {
+      env.truncateDatabase(null, mangleTreeName(treeName), false);
+    }
+
+    @Override
+    public void close()
+    {
+      closeSilently(trees.values());
+      trees.clear();
+      JEStorage.this.close();
+    }
+  }
+
+  /** JE implementation of the {@link WriteableTransaction} interface. */
+  private final class WriteableTransactionImpl implements WriteableTransaction
+  {
+    private final Transaction txn;
+
+    private WriteableTransactionImpl(Transaction txn)
+    {
+      this.txn = txn;
+    }
+
+    /**
+     * This is currently needed for import-ldif:
+     * <ol>
+     * <li>Opening the EntryContainer calls {@link #openTree(TreeName, boolean)} for each index</li>
+     * <li>Then the underlying storage is closed</li>
+     * <li>Then {@link Importer#startImport()} is called</li>
+     * <li>Then ID2Entry#put() is called</li>
+     * <li>Which in turn calls ID2Entry#encodeEntry()</li>
+     * <li>Which in turn finally calls PersistentCompressedSchema#store()</li>
+     * <li>Which uses a reference to the storage (that was closed before calling startImport()) and
+     * uses it as if it was open</li>
+     * </ol>
+     */
+    private Database getOrOpenTree(TreeName treeName)
+    {
+      try
+      {
+        return getOrOpenTree0(txn, trees, treeName);
+      }
+      catch (Exception e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+
+    @Override
+    public void put(final TreeName treeName, final ByteSequence key, final ByteSequence value)
+    {
+      try
+      {
+        final OperationStatus status = getOrOpenTree(treeName).put(txn, db(key), db(value));
+        if (status != SUCCESS)
+        {
+          throw new StorageRuntimeException(putErrorMsg(treeName, key, value, "did not succeed: " + status));
+        }
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(putErrorMsg(treeName, key, value, "threw an exception"), e);
+      }
+    }
+
+    private String putErrorMsg(TreeName treeName, ByteSequence key, ByteSequence value, String msg)
+    {
+      return "put(treeName=" + treeName + ", key=" + key + ", value=" + value + ") " + msg;
+    }
+
+    @Override
+    public boolean delete(final TreeName treeName, final ByteSequence key)
+    {
+      try
+      {
+        return getOrOpenTree(treeName).delete(txn, db(key)) == SUCCESS;
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(deleteErrorMsg(treeName, key, "threw an exception"), e);
+      }
+    }
+
+    private String deleteErrorMsg(TreeName treeName, ByteSequence key, String msg)
+    {
+      return "delete(treeName=" + treeName + ", key=" + key + ") " + msg;
+    }
+
+    @Override
+    public long getRecordCount(TreeName treeName)
+    {
+      try
+      {
+        return getOrOpenTree(treeName).count();
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+
+    @Override
+    public Cursor<ByteString, ByteString> openCursor(final TreeName treeName)
+    {
+      try
+      {
+        return new CursorImpl(getOrOpenTree(treeName).openCursor(txn, CursorConfig.READ_COMMITTED));
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+
+    @Override
+    public ByteString read(final TreeName treeName, final ByteSequence key)
+    {
+      try
+      {
+        DatabaseEntry dbValue = new DatabaseEntry();
+        boolean isDefined = getOrOpenTree(treeName).get(txn, db(key), dbValue, null) == SUCCESS;
+        return valueToBytes(dbValue, isDefined);
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+
+    @Override
+    public boolean update(final TreeName treeName, final ByteSequence key, final UpdateFunction f)
+    {
+      try
+      {
+        Database tree = getOrOpenTree(treeName);
+        DatabaseEntry dbKey = db(key);
+        DatabaseEntry dbValue = new DatabaseEntry();
+
+        boolean isDefined = tree.get(txn, dbKey, dbValue, LockMode.RMW) == SUCCESS;
+        final ByteSequence oldValue = valueToBytes(dbValue, isDefined);
+        final ByteSequence newValue = f.computeNewValue(oldValue);
+        if (!Objects.equals(newValue, oldValue))
+        {
+          if (newValue == null)
+          {
+            return tree.delete(txn, dbKey) == SUCCESS;
+          }
+
+          setData(dbValue, newValue);
+          return tree.put(txn, dbKey, dbValue) == SUCCESS;
+        }
+        return false;
+      }
+      catch (DatabaseException e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+
+    @Override
+    public void openTree(final TreeName treeName, boolean createOnDemand)
+    {
+      getOrOpenTree(treeName);
+    }
+
+    @Override
+    public void deleteTree(final TreeName treeName)
+    {
+      try
+      {
+        synchronized (trees)
+        {
+          trees.remove(treeName);
+          env.removeDatabase(txn, mangleTreeName(treeName));
+        }
+      }
+      catch (DatabaseNotFoundException e)
+      {
+        throw new StorageRuntimeException(e);
+      }
+    }
+  }
+
+  /** JE read-only implementation of {@link StorageImpl} interface. */
+  private final class ReadOnlyTransactionImpl implements WriteableTransaction
+  {
+    private final WriteableTransactionImpl delegate;
+
+    ReadOnlyTransactionImpl(WriteableTransactionImpl delegate)
+    {
+      this.delegate = delegate;
+    }
+
+    @Override
+    public ByteString read(TreeName treeName, ByteSequence key)
+    {
+      return delegate.read(treeName, key);
+    }
+
+    @Override
+    public Cursor<ByteString, ByteString> openCursor(TreeName treeName)
+    {
+      return delegate.openCursor(treeName);
+    }
+
+    @Override
+    public long getRecordCount(TreeName treeName)
+    {
+      return delegate.getRecordCount(treeName);
+    }
+
+    @Override
+    public void openTree(TreeName treeName, boolean createOnDemand)
+    {
+      if (createOnDemand)
+      {
+        throw new ReadOnlyStorageException();
+      }
+      delegate.openTree(treeName, false);
+    }
+
+    @Override
+    public void deleteTree(TreeName name)
+    {
+      throw new ReadOnlyStorageException();
+    }
+
+    @Override
+    public void put(TreeName treeName, ByteSequence key, ByteSequence value)
+    {
+      throw new ReadOnlyStorageException();
+    }
+
+    @Override
+    public boolean update(TreeName treeName, ByteSequence key, UpdateFunction f)
+    {
+      throw new ReadOnlyStorageException();
+    }
+
+    @Override
+    public boolean delete(TreeName treeName, ByteSequence key)
+    {
+      throw new ReadOnlyStorageException();
+    }
+  }
+
+  private WriteableTransaction newWriteableTransaction(Transaction txn)
+  {
+    final WriteableTransactionImpl writeableStorage = new WriteableTransactionImpl(txn);
+    return accessMode.isWriteable() ? writeableStorage : new ReadOnlyTransactionImpl(writeableStorage);
+  }
+
+  private static final LocalizedLogger logger = LocalizedLogger.getLoggerForThisClass();
+  private final ServerContext serverContext;
+  private final File backendDirectory;
+  private JEBackendCfg config;
+  private AccessMode accessMode;
+
+  private Environment env;
+  private EnvironmentConfig envConfig;
+  private MemoryQuota memQuota;
+  private JEMonitor monitor;
+  private DiskSpaceMonitor diskMonitor;
+  private StorageStatus storageStatus = StorageStatus.working();
+  private final ConcurrentMap<TreeName, Database> trees = new ConcurrentHashMap<>();
+
+  /**
+   * Creates a new JE storage with the provided configuration.
+   *
+   * @param cfg
+   *          The configuration.
+   * @param serverContext
+   *          This server instance context
+   * @throws ConfigException
+   *           if memory cannot be reserved
+   */
+  JEStorage(final JEBackendCfg cfg, ServerContext serverContext) throws ConfigException
+  {
+    this.serverContext = serverContext;
+    backendDirectory = new File(getFileForPath(cfg.getDBDirectory()), cfg.getBackendId());
+    config = cfg;
+    cfg.addJEChangeListener(this);
+  }
+
+  private Database getOrOpenTree0(Transaction txn, Map<TreeName, Database> trees, TreeName treeName)
+  {
+    Database tree = trees.get(treeName);
+    if (tree == null)
+    {
+      synchronized (trees)
+      {
+        tree = trees.get(treeName);
+        if (tree == null)
+        {
+          tree = env.openDatabase(txn, mangleTreeName(treeName), dbConfig());
+          trees.put(treeName, tree);
+        }
+      }
+    }
+    return tree;
+  }
+
+  private void buildConfiguration(AccessMode accessMode, boolean isImport) throws ConfigException
+  {
+    this.accessMode = accessMode;
+
+    if (isImport)
+    {
+      envConfig = new EnvironmentConfig();
+      envConfig
+        .setTransactional(false)
+        .setAllowCreate(true)
+        .setLockTimeout(0, TimeUnit.SECONDS)
+        .setTxnTimeout(0, TimeUnit.SECONDS)
+        .setDurability(Durability.COMMIT_NO_SYNC)
+        .setConfigParam(CLEANER_MIN_FILE_UTILIZATION, String.valueOf(config.getDBCleanerMinUtilization()))
+        .setConfigParam(LOG_FILE_MAX, String.valueOf(config.getDBLogFileMax()));
+    }
+    else
+    {
+      envConfig = ConfigurableEnvironment.parseConfigEntry(config);
+    }
+
+    diskMonitor = serverContext.getDiskSpaceMonitor();
+    memQuota = serverContext.getMemoryQuota();
+    if (config.getDBCacheSize() > 0)
+    {
+      memQuota.acquireMemory(config.getDBCacheSize());
+    }
+    else
+    {
+      memQuota.acquireMemory(memQuota.memPercentToBytes(config.getDBCachePercent()));
+    }
+  }
+
+  private DatabaseConfig dbConfig()
+  {
+    boolean isImport = !envConfig.getTransactional();
+    return new DatabaseConfig()
+      .setKeyPrefixing(true)
+      .setAllowCreate(true)
+      .setTransactional(!isImport)
+      .setDeferredWrite(isImport);
+  }
+
+  @Override
+  public void close()
+  {
+    synchronized (trees)
+    {
+      closeSilently(trees.values());
+      trees.clear();
+    }
+
+    if (env != null)
+    {
+      DirectoryServer.deregisterMonitorProvider(monitor);
+      monitor = null;
+      try
+      {
+        env.close();
+        env = null;
+      }
+      catch (DatabaseException e)
+      {
+        throw new IllegalStateException(e);
+      }
+    }
+
+    if (config.getDBCacheSize() > 0)
+    {
+      memQuota.releaseMemory(config.getDBCacheSize());
+    }
+    else
+    {
+      memQuota.releaseMemory(memQuota.memPercentToBytes(config.getDBCachePercent()));
+    }
+    config.removeJEChangeListener(this);
+    diskMonitor.deregisterMonitoredDirectory(getDirectory(), this);
+  }
+
+  @Override
+  public void open(AccessMode accessMode) throws ConfigException, StorageRuntimeException
+  {
+    Reject.ifNull(accessMode, "accessMode must not be null");
+    buildConfiguration(accessMode, false);
+    open0();
+  }
+
+  private void open0() throws ConfigException
+  {
+    setupStorageFiles();
+    try
+    {
+      if (env != null)
+      {
+        throw new IllegalStateException(
+            "Database is already open, either the backend is enabled or an import is currently running.");
+      }
+      env = new Environment(backendDirectory, envConfig);
+      monitor = new JEMonitor(config.getBackendId() + " JE Database", env);
+      DirectoryServer.registerMonitorProvider(monitor);
+    }
+    catch (DatabaseException e)
+    {
+      throw new StorageRuntimeException(e);
+    }
+    diskMonitor.registerMonitoredDirectory(
+        config.getBackendId() + " backend",
+        getDirectory(),
+        config.getDiskLowThreshold(),
+        config.getDiskFullThreshold(),
+        this);
+  }
+
+  @Override
+  public <T> T read(final ReadOperation<T> operation) throws Exception
+  {
+    final Transaction txn = beginTransaction();
+    try
+    {
+      final T result = operation.run(newWriteableTransaction(txn));
+      commit(txn);
+      return result;
+    }
+    catch (final StorageRuntimeException e)
+    {
+      if (e.getCause() != null)
+      {
+        throw (Exception) e.getCause();
+      }
+      throw e;
+    }
+    finally
+    {
+      abort(txn);
+    }
+  }
+
+  @Override
+  public Importer startImport() throws ConfigException, StorageRuntimeException
+  {
+    buildConfiguration(AccessMode.READ_WRITE, true);
+    open0();
+    return new ImporterImpl();
+  }
+
+  private static String mangleTreeName(final TreeName treeName)
+  {
+    StringBuilder mangled = new StringBuilder();
+    String name = treeName.toString();
+
+    for (int idx = 0; idx < name.length(); idx++)
+    {
+      char ch = name.charAt(idx);
+      if (ch == '=' || ch == ',')
+      {
+        ch = '_';
+      }
+      mangled.append(ch);
+    }
+    return mangled.toString();
+  }
+
+  @Override
+  public void write(final WriteOperation operation) throws Exception
+  {
+    final Transaction txn = beginTransaction();
+    try
+    {
+      operation.run(newWriteableTransaction(txn));
+      commit(txn);
+    }
+    catch (final StorageRuntimeException e)
+    {
+      if (e.getCause() != null)
+      {
+        throw (Exception) e.getCause();
+      }
+      throw e;
+    }
+    finally
+    {
+      abort(txn);
+    }
+  }
+
+  private Transaction beginTransaction()
+  {
+    if (envConfig.getTransactional())
+    {
+      final Transaction txn = env.beginTransaction(null, new TransactionConfig());
+      logger.trace("beginTransaction", "begin txnid=" + txn.getId());
+      return txn;
+    }
+    return null;
+  }
+
+  private void commit(final Transaction txn)
+  {
+    if (envConfig.getTransactional())
+    {
+      txn.commit();
+      logger.trace("commit txnid=%d", txn.getId());
+    }
+  }
+
+  private void abort(final Transaction txn)
+  {
+    if (envConfig.getTransactional())
+    {
+      txn.abort();
+      logger.trace("abort txnid=%d", txn.getId());
+    }
+  }
+
+  @Override
+  public boolean supportsBackupAndRestore()
+  {
+    return true;
+  }
+
+  @Override
+  public File getDirectory()
+  {
+    File parentDir = getFileForPath(config.getDBDirectory());
+    return new File(parentDir, config.getBackendId());
+  }
+
+  @Override
+  public ListIterator<Path> getFilesToBackup() throws DirectoryException
+  {
+    return new JELogFilesIterator(getDirectory(), config.getBackendId());
+  }
+
+  /**
+   * Iterator on JE log files to backup.
+   * <p>
+   * The cleaner thread may delete some log files during the backup. The iterator is automatically
+   * renewed if at least one file has been deleted.
+   */
+  static class JELogFilesIterator implements ListIterator<Path>
+  {
+    /** Root directory where all files are located. */
+    private final File rootDirectory;
+    private final String backendID;
+
+    /** Underlying iterator on files. */
+    private ListIterator<Path> iterator;
+    /** Files to backup. Used to renew the iterator if necessary. */
+    private List<Path> files;
+
+    private String lastFileName = "";
+    private long lastFileSize;
+
+    JELogFilesIterator(File rootDirectory, String backendID) throws DirectoryException
+    {
+      this.rootDirectory = rootDirectory;
+      this.backendID = backendID;
+      setFiles(BackupManager.getFiles(rootDirectory, new JELogFileFilter(), backendID));
+    }
+
+    private void setFiles(List<Path> files)
+    {
+      this.files = files;
+      Collections.sort(files);
+      if (!files.isEmpty())
+      {
+        Path lastFile = files.get(files.size() - 1);
+        lastFileName = lastFile.getFileName().toString();
+        lastFileSize = lastFile.toFile().length();
+      }
+      iterator = files.listIterator();
+    }
+
+    @Override
+    public boolean hasNext()
+    {
+      boolean hasNext = iterator.hasNext();
+      if (!hasNext && !files.isEmpty())
+      {
+        try
+        {
+          List<Path> allFiles = BackupManager.getFiles(rootDirectory, new JELogFileFilter(), backendID);
+          List<Path> compare = new ArrayList<>(files);
+          compare.removeAll(allFiles);
+          if (!compare.isEmpty())
+          {
+            // at least one file was deleted,
+            // the iterator must be renewed based on last file previously available
+            List<Path> newFiles =
+                BackupManager.getFiles(rootDirectory, new JELogFileFilter(lastFileName, lastFileSize), backendID);
+            logger.info(NOTE_JEB_BACKUP_CLEANER_ACTIVITY.get(newFiles.size()));
+            if (!newFiles.isEmpty())
+            {
+              setFiles(newFiles);
+              hasNext = iterator.hasNext();
+            }
+          }
+        }
+        catch (DirectoryException e)
+        {
+          logger.error(ERR_BACKEND_LIST_FILES_TO_BACKUP.get(backendID, stackTraceToSingleLineString(e)));
+        }
+      }
+      return hasNext;
+    }
+
+    @Override
+    public Path next()
+    {
+      if (hasNext())
+      {
+        return iterator.next();
+      }
+      throw new NoSuchElementException();
+    }
+
+    @Override
+    public boolean hasPrevious()
+    {
+      return iterator.hasPrevious();
+    }
+
+    @Override
+    public Path previous()
+    {
+      return iterator.previous();
+    }
+
+    @Override
+    public int nextIndex()
+    {
+      return iterator.nextIndex();
+    }
+
+    @Override
+    public int previousIndex()
+    {
+      return iterator.previousIndex();
+    }
+
+    @Override
+    public void remove()
+    {
+      throw new UnsupportedOperationException("remove() is not implemented");
+    }
+
+    @Override
+    public void set(Path e)
+    {
+      throw new UnsupportedOperationException("set() is not implemented");
+    }
+
+    @Override
+    public void add(Path e)
+    {
+      throw new UnsupportedOperationException("add() is not implemented");
+    }
+  }
+
+  /**
+   * This class implements a FilenameFilter to detect a JE log file, possibly with a constraint on
+   * the file name and file size.
+   */
+  private static class JELogFileFilter implements FileFilter
+  {
+    private final String latestFilename;
+    private final long latestFileSize;
+
+    /**
+     * Creates the filter for log files that are newer than provided file name
+     * or equal to provided file name and of larger size.
+     */
+    JELogFileFilter(String latestFilename, long latestFileSize)
+    {
+      this.latestFilename = latestFilename;
+      this.latestFileSize = latestFileSize;
+    }
+
+    /** Creates the filter for any JE log file. */
+    JELogFileFilter()
+    {
+      this("", 0);
+    }
+
+    @Override
+    public boolean accept(File file)
+    {
+      String name = file.getName();
+      int cmp = name.compareTo(latestFilename);
+      return name.endsWith(".jdb")
+          && (cmp > 0 || (cmp == 0 && file.length() > latestFileSize));
+    }
+  }
+
+  @Override
+  public Path beforeRestore() throws DirectoryException
+  {
+    return null;
+  }
+
+  @Override
+  public boolean isDirectRestore()
+  {
+    // restore is done in an intermediate directory
+    return false;
+  }
+
+  @Override
+  public void afterRestore(Path restoreDirectory, Path saveDirectory) throws DirectoryException
+  {
+    // intermediate directory content is moved to database directory
+    File targetDirectory = getDirectory();
+    recursiveDelete(targetDirectory);
+    try
+    {
+      Files.move(restoreDirectory, targetDirectory.toPath());
+    }
+    catch(IOException e)
+    {
+      LocalizableMessage msg = ERR_CANNOT_RENAME_RESTORE_DIRECTORY.get(restoreDirectory, targetDirectory.getPath());
+      throw new DirectoryException(DirectoryServer.getServerErrorResultCode(), msg);
+    }
+  }
+
+  @Override
+  public void createBackup(BackupConfig backupConfig) throws DirectoryException
+  {
+    new BackupManager(config.getBackendId()).createBackup(this, backupConfig);
+  }
+
+  @Override
+  public void removeBackup(BackupDirectory backupDirectory, String backupID) throws DirectoryException
+  {
+    new BackupManager(config.getBackendId()).removeBackup(backupDirectory, backupID);
+  }
+
+  @Override
+  public void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException
+  {
+    new BackupManager(config.getBackendId()).restoreBackup(this, restoreConfig);
+  }
+
+  @Override
+  public Set<TreeName> listTrees()
+  {
+    try
+    {
+      List<String> treeNames = env.getDatabaseNames();
+      final Set<TreeName> results = new HashSet<>(treeNames.size());
+      for (String treeName : treeNames)
+      {
+        results.add(TreeName.valueOf(treeName));
+      }
+      return results;
+    }
+    catch (DatabaseException e)
+    {
+      throw new StorageRuntimeException(e);
+    }
+  }
+
+  @Override
+  public boolean isConfigurationChangeAcceptable(JEBackendCfg newCfg,
+      List<LocalizableMessage> unacceptableReasons)
+  {
+    long newSize = computeSize(newCfg);
+    long oldSize = computeSize(config);
+    return (newSize <= oldSize || memQuota.isMemoryAvailable(newSize - oldSize))
+        && checkConfigurationDirectories(newCfg, unacceptableReasons);
+  }
+
+  private long computeSize(JEBackendCfg cfg)
+  {
+    return cfg.getDBCacheSize() > 0 ? cfg.getDBCacheSize() : memQuota.memPercentToBytes(cfg.getDBCachePercent());
+  }
+
+  /**
+   * Checks newly created backend has a valid configuration.
+   * @param cfg the new configuration
+   * @param unacceptableReasons the list of accumulated errors and their messages
+   * @param context TODO
+   * @return true if newly created backend has a valid configuration
+   */
+  static boolean isConfigurationAcceptable(JEBackendCfg cfg, List<LocalizableMessage> unacceptableReasons,
+      ServerContext context)
+  {
+    if (context != null)
+    {
+      MemoryQuota memQuota = context.getMemoryQuota();
+      if (cfg.getDBCacheSize() > 0 && !memQuota.isMemoryAvailable(cfg.getDBCacheSize()))
+      {
+        unacceptableReasons.add(ERR_BACKEND_CONFIG_CACHE_SIZE_GREATER_THAN_JVM_HEAP.get(
+            cfg.getDBCacheSize(), memQuota.getAvailableMemory()));
+        return false;
+      }
+      else if (!memQuota.isMemoryAvailable(memQuota.memPercentToBytes(cfg.getDBCachePercent())))
+      {
+        unacceptableReasons.add(ERR_BACKEND_CONFIG_CACHE_PERCENT_GREATER_THAN_JVM_HEAP.get(
+            cfg.getDBCachePercent(), memQuota.memBytesToPercent(memQuota.getAvailableMemory())));
+        return false;
+      }
+    }
+    return checkConfigurationDirectories(cfg, unacceptableReasons);
+  }
+
+  private static boolean checkConfigurationDirectories(JEBackendCfg cfg,
+    List<LocalizableMessage> unacceptableReasons)
+  {
+    final ConfigChangeResult ccr = new ConfigChangeResult();
+    File parentDirectory = getFileForPath(cfg.getDBDirectory());
+    File newBackendDirectory = new File(parentDirectory, cfg.getBackendId());
+
+    checkDBDirExistsOrCanCreate(newBackendDirectory, ccr, true);
+    checkDBDirPermissions(cfg, ccr);
+    if (!ccr.getMessages().isEmpty())
+    {
+      unacceptableReasons.addAll(ccr.getMessages());
+      return false;
+    }
+    return true;
+  }
+
+  /**
+   * Checks a directory exists or can actually be created.
+   *
+   * @param backendDir the directory to check for
+   * @param ccr the list of reasons to return upstream or null if called from setupStorage()
+   * @param cleanup true if the directory should be deleted after creation
+   */
+  private static void checkDBDirExistsOrCanCreate(File backendDir, ConfigChangeResult ccr, boolean cleanup)
+  {
+    if (!backendDir.exists())
+    {
+      if(!backendDir.mkdirs())
+      {
+        addErrorMessage(ccr, ERR_CREATE_FAIL.get(backendDir.getPath()));
+      }
+      if (cleanup)
+      {
+        backendDir.delete();
+      }
+    }
+    else if (!backendDir.isDirectory())
+    {
+      addErrorMessage(ccr, ERR_DIRECTORY_INVALID.get(backendDir.getPath()));
+    }
+  }
+
+  /**
+   * Returns false if directory permissions in the configuration are invalid. Otherwise returns the
+   * same value as it was passed in.
+   *
+   * @param cfg a (possibly new) backend configuration
+   * @param ccr the current list of change results
+   * @throws forwards a file exception
+   */
+  private static void checkDBDirPermissions(JEBackendCfg cfg, ConfigChangeResult ccr)
+  {
+    try
+    {
+      FilePermission backendPermission = decodeDBDirPermissions(cfg);
+      // Make sure the mode will allow the server itself access to the database
+      if(!backendPermission.isOwnerWritable() ||
+          !backendPermission.isOwnerReadable() ||
+          !backendPermission.isOwnerExecutable())
+      {
+        addErrorMessage(ccr, ERR_CONFIG_BACKEND_INSANE_MODE.get(cfg.getDBDirectoryPermissions()));
+      }
+    }
+    catch(ConfigException ce)
+    {
+      addErrorMessage(ccr, ce.getMessageObject());
+    }
+  }
+
+  /**
+   * Sets files permissions on the backend directory.
+   *
+   * @param backendDir the directory to setup
+   * @param curCfg a backend configuration
+   */
+  private void setDBDirPermissions(JEBackendCfg curCfg, File backendDir) throws ConfigException
+  {
+    FilePermission backendPermission = decodeDBDirPermissions(curCfg);
+
+    // Get the backend database backendDirectory permissions and apply
+    try
+    {
+      if(!FilePermission.setPermissions(backendDir, backendPermission))
+      {
+        logger.warn(WARN_UNABLE_SET_PERMISSIONS, backendPermission, backendDir);
+      }
+    }
+    catch(Exception e)
+    {
+      // Log an warning that the permissions were not set.
+      logger.warn(WARN_SET_PERMISSIONS_FAILED, backendDir, e);
+    }
+  }
+
+  private static FilePermission decodeDBDirPermissions(JEBackendCfg curCfg) throws ConfigException
+  {
+    try
+    {
+      return FilePermission.decodeUNIXMode(curCfg.getDBDirectoryPermissions());
+    }
+    catch (Exception e)
+    {
+      throw new ConfigException(ERR_CONFIG_BACKEND_MODE_INVALID.get(curCfg.dn()));
+    }
+  }
+
+  @Override
+  public ConfigChangeResult applyConfigurationChange(JEBackendCfg cfg)
+  {
+    final ConfigChangeResult ccr = new ConfigChangeResult();
+
+    try
+    {
+      File parentDirectory = getFileForPath(cfg.getDBDirectory());
+      File newBackendDirectory = new File(parentDirectory, cfg.getBackendId());
+
+      // Create the directory if it doesn't exist.
+      if(!cfg.getDBDirectory().equals(config.getDBDirectory()))
+      {
+        checkDBDirExistsOrCanCreate(newBackendDirectory, ccr, false);
+        if (!ccr.getMessages().isEmpty())
+        {
+          return ccr;
+        }
+
+        ccr.setAdminActionRequired(true);
+        ccr.addMessage(NOTE_CONFIG_DB_DIR_REQUIRES_RESTART.get(config.getDBDirectory(), cfg.getDBDirectory()));
+      }
+
+      if (!cfg.getDBDirectoryPermissions().equalsIgnoreCase(config.getDBDirectoryPermissions())
+          || !cfg.getDBDirectory().equals(config.getDBDirectory()))
+      {
+        checkDBDirPermissions(cfg, ccr);
+        if (!ccr.getMessages().isEmpty())
+        {
+          return ccr;
+        }
+
+        setDBDirPermissions(cfg, newBackendDirectory);
+      }
+      diskMonitor.registerMonitoredDirectory(
+        config.getBackendId() + " backend",
+        getDirectory(),
+        cfg.getDiskLowThreshold(),
+        cfg.getDiskFullThreshold(),
+        this);
+      config = cfg;
+    }
+    catch (Exception e)
+    {
+      addErrorMessage(ccr, LocalizableMessage.raw(stackTraceToSingleLineString(e)));
+    }
+    return ccr;
+  }
+
+  private static void addErrorMessage(final ConfigChangeResult ccr, LocalizableMessage message)
+  {
+    ccr.setResultCode(DirectoryServer.getServerErrorResultCode());
+    ccr.addMessage(message);
+  }
+
+  private void setupStorageFiles() throws ConfigException
+  {
+    ConfigChangeResult ccr = new ConfigChangeResult();
+
+    checkDBDirExistsOrCanCreate(backendDirectory, ccr, false);
+    if (!ccr.getMessages().isEmpty())
+    {
+      throw new ConfigException(ccr.getMessages().get(0));
+    }
+    checkDBDirPermissions(config, ccr);
+    if (!ccr.getMessages().isEmpty())
+    {
+      throw new ConfigException(ccr.getMessages().get(0));
+    }
+    setDBDirPermissions(config, backendDirectory);
+  }
+
+  @Override
+  public void removeStorageFiles() throws StorageRuntimeException
+  {
+    if (!backendDirectory.exists())
+    {
+      return;
+    }
+
+    if (!backendDirectory.isDirectory())
+    {
+      throw new StorageRuntimeException(ERR_DIRECTORY_INVALID.get(backendDirectory.getPath()).toString());
+    }
+
+    try
+    {
+      File[] files = backendDirectory.listFiles();
+      for (File f : files)
+      {
+        f.delete();
+      }
+    }
+    catch (Exception e)
+    {
+      logger.traceException(e);
+      throw new StorageRuntimeException(ERR_REMOVE_FAIL.get(e.getMessage()).toString(), e);
+    }
+  }
+
+  @Override
+  public StorageStatus getStorageStatus()
+  {
+    return storageStatus;
+  }
+
+  @Override
+  public void diskFullThresholdReached(File directory, long thresholdInBytes) {
+    storageStatus = StorageStatus.unusable(
+        WARN_DISK_SPACE_FULL_THRESHOLD_CROSSED.get(directory.getFreeSpace(), directory.getAbsolutePath(),
+        thresholdInBytes, config.getBackendId()));
+  }
+
+  @Override
+  public void diskLowThresholdReached(File directory, long thresholdInBytes) {
+    storageStatus = StorageStatus.lockedDown(
+        WARN_DISK_SPACE_LOW_THRESHOLD_CROSSED.get(directory.getFreeSpace(), directory.getAbsolutePath(),
+        thresholdInBytes, config.getBackendId()));
+  }
+
+  @Override
+  public void diskSpaceRestored(File directory, long lowThresholdInBytes, long fullThresholdInBytes) {
+    storageStatus = StorageStatus.working();
+  }
+
+  private static void setData(final DatabaseEntry dbEntry, final ByteSequence bs)
+  {
+    dbEntry.setData(bs != null ? bs.toByteArray() : null);
+  }
+
+  private static DatabaseEntry db(final ByteSequence bs)
+  {
+    return new DatabaseEntry(bs != null ? bs.toByteArray() : null);
+  }
+
+  private static ByteString valueToBytes(final DatabaseEntry dbValue, boolean isDefined)
+  {
+    if (isDefined)
+    {
+      return ByteString.wrap(dbValue.getData());
+    }
+    return null;
+  }
+}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pdb/PDBMonitor.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pdb/PDBMonitor.java
index d63c71b..04d8473 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pdb/PDBMonitor.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pdb/PDBMonitor.java
@@ -25,25 +25,26 @@
  */
 package org.opends.server.backends.pdb;
 
+import static org.opends.server.util.StaticUtils.*;
+
 import java.lang.reflect.Method;
 import java.rmi.RemoteException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 
-import org.forgerock.opendj.config.server.ConfigException;
 import org.opends.server.admin.std.server.MonitorProviderCfg;
 import org.opends.server.api.MonitorProvider;
-import org.opends.server.types.*;
+import org.opends.server.types.Attribute;
+import org.opends.server.types.Attributes;
 
-import com.persistit.Management.*;
+import com.persistit.Management.BufferPoolInfo;
+import com.persistit.Management.TreeInfo;
+import com.persistit.Management.VolumeInfo;
 import com.persistit.Management.WrappedRemoteException;
 import com.persistit.Persistit;
 
-/**
- * Monitoring Class for PDB, populating cn=monitor statistics using reflection on objects
- * methods.
- */
+/** Monitoring class for PDB, populating cn=monitor statistics using reflection on objects methods. */
 class PDBMonitor extends MonitorProvider<MonitorProviderCfg>
 {
   private final String name;
@@ -56,13 +57,6 @@
   }
 
   @Override
-  public void initializeMonitorProvider(MonitorProviderCfg configuration) throws ConfigException,
-      InitializationException
-  {
-    return;
-  }
-
-  @Override
   public String getMonitorInstanceName()
   {
     return name;
@@ -88,18 +82,18 @@
         for (TreeInfo tree : db.getManagement().getTreeInfoArray(vol.getName()))
         {
           // For the time being, depth is not reported.
-          monitorAttrs.add(Attributes.create("PDBVolumeTree", vol.getName() + tree.getName() +
-              ", traverse=" + tree.getTraverseCounter() +
-              ", fetch=" + tree.getFetchCounter() +
-              ", store=" + tree.getStoreCounter() +
-              ", remove=" + tree.getRemoveCounter()));
+          monitorAttrs.add(Attributes.create("PDBVolumeTree", vol.getName() + tree.getName()
+              + ", traverse=" + tree.getTraverseCounter()
+              + ", fetch=" + tree.getFetchCounter()
+              + ", store=" + tree.getStoreCounter()
+              + ", remove=" + tree.getRemoveCounter()));
         }
       }
       return monitorAttrs;
     }
-    catch (RemoteException re)
+    catch (RemoteException e)
     {
-      return Collections.singletonList(Attributes.create("PDBInfo", re.getStackTrace().toString()));
+      return Collections.singletonList(Attributes.create("PDBInfo", stackTraceToSingleLineString(e)));
     }
   }
 
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pdb/PDBStorage.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pdb/PDBStorage.java
index 41d57a5..36f5b74 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pdb/PDBStorage.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pdb/PDBStorage.java
@@ -111,11 +111,8 @@
   private static final int IMPORT_DB_CACHE_SIZE = 4 * MB;
 
   private static final double MAX_SLEEP_ON_RETRY_MS = 50.0;
-
   private static final String VOLUME_NAME = "dj";
-
   private static final String JOURNAL_NAME = VOLUME_NAME + "_journal";
-
   /** The buffer / page size used by the PersistIt storage. */
   private static final int BUFFER_SIZE = 16 * 1024;
 
@@ -139,7 +136,8 @@
     }
 
     @Override
-    public boolean isDefined() {
+    public boolean isDefined()
+    {
       return exchange.getValue().isDefined();
     }
 
@@ -255,8 +253,10 @@
       currentValue = null;
     }
 
-    private void throwIfUndefined() {
-      if (!isDefined()) {
+    private void throwIfUndefined()
+    {
+      if (!isDefined())
+      {
         throw new NoSuchElementException();
       }
     }
@@ -413,8 +413,8 @@
   /** PersistIt implementation of the {@link WriteableTransaction} interface. */
   private final class WriteableStorageImpl implements StorageImpl
   {
+    private static final String DUMMY_RECORD = "_DUMMY_RECORD_";
     private final Map<TreeName, Exchange> exchanges = new HashMap<>();
-    private final String DUMMY_RECORD = "_DUMMY_RECORD_";
 
     @Override
     public void put(final TreeName treeName, final ByteSequence key, final ByteSequence value)
@@ -713,7 +713,7 @@
   private Volume volume;
   private PDBBackendCfg config;
   private DiskSpaceMonitor diskMonitor;
-  private PDBMonitor pdbMonitor;
+  private PDBMonitor monitor;
   private MemoryQuota memQuota;
   private StorageStatus storageStatus = StorageStatus.working();
 
@@ -780,8 +780,8 @@
   {
     if (db != null)
     {
-      DirectoryServer.deregisterMonitorProvider(pdbMonitor);
-      pdbMonitor = null;
+      DirectoryServer.deregisterMonitorProvider(monitor);
+      monitor = null;
       try
       {
         db.close();
@@ -834,8 +834,8 @@
 
       db.initialize();
       volume = db.loadVolume(VOLUME_NAME);
-      pdbMonitor = new PDBMonitor(config.getBackendId() + " PDB Database", db);
-      DirectoryServer.registerMonitorProvider(pdbMonitor);
+      monitor = new PDBMonitor(config.getBackendId() + " PDB Database", db);
+      DirectoryServer.registerMonitorProvider(monitor);
     }
     catch(final InUseException e) {
       throw new StorageInUseException(e);
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/BackendMonitor.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/BackendMonitor.java
index 90e4ee4..b09f3bd 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/BackendMonitor.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/BackendMonitor.java
@@ -29,6 +29,7 @@
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicInteger;
 
@@ -49,14 +50,15 @@
 class BackendMonitor extends MonitorProvider<MonitorProviderCfg>
 {
   /** Represents the statistical information kept for each search filter. */
-  private static class FilterStats implements Comparable<FilterStats>
+  private static final class FilterStats implements Comparable<FilterStats>
   {
     private volatile LocalizableMessage failureReason = LocalizableMessage.EMPTY;
     private long maxMatchingEntries = -1;
     private final AtomicInteger hits = new AtomicInteger();
 
     @Override
-    public int compareTo(FilterStats that) {
+    public int compareTo(FilterStats that)
+    {
       return this.hits.get() - that.hits.get();
     }
 
@@ -70,9 +72,9 @@
     {
       this.hits.getAndAdd(hitCount);
       this.failureReason = LocalizableMessage.EMPTY;
-      synchronized(this)
+      synchronized (this)
       {
-        if(matchingEntries > maxMatchingEntries)
+        if (matchingEntries > maxMatchingEntries)
         {
           maxMatchingEntries = matchingEntries;
         }
@@ -82,7 +84,6 @@
 
   /** The name of this monitor instance. */
   private final String name;
-
   /** The root container to be monitored. */
   private final RootContainer rootContainer;
 
@@ -107,16 +108,10 @@
 
   @Override
   public void initializeMonitorProvider(MonitorProviderCfg configuration)
-       throws ConfigException, InitializationException
+      throws ConfigException, InitializationException
   {
   }
 
-  /**
-   * Retrieves the name of this monitor provider.  It should be unique among all
-   * monitor providers, including all instances of the same monitor provider.
-   *
-   * @return The name of this monitor provider.
-   */
   @Override
   public String getMonitorInstanceName()
   {
@@ -128,61 +123,69 @@
    * returned to the client if the corresponding monitor entry is requested.
    *
    * @return A set of attributes containing monitor data that should be
-   *         returned to the client if the corresponding monitor entry is
-   *         requested.
+   *         returned to the client if the corresponding monitor entry is requested.
    */
   @Override
-  public ArrayList<Attribute> getMonitorData()
+  public List<Attribute> getMonitorData()
   {
-    ArrayList<Attribute> monitorAttrs = new ArrayList<>();
+    List<Attribute> monitorAttrs = new ArrayList<>();
 
-    AttributeBuilder needReindex = new AttributeBuilder("need-reindex");
-    for(EntryContainer ec : rootContainer.getEntryContainers())
+    AttributeBuilder needReindex = createNeedReindex("need-reindex");
+    if (needReindex.size() > 0)
     {
-      for(Tree tree : ec.listTrees())
+      monitorAttrs.add(needReindex.toAttribute());
+    }
+
+    if (filterUseEnabled)
+    {
+      monitorAttrs.add(createAttribute("filter-use-startTime", startTimeStamp));
+      monitorAttrs.add(createFilterUse("filter-use"));
+      monitorAttrs.add(createAttribute("filter-use-indexed", indexedSearchCount));
+      monitorAttrs.add(createAttribute("filter-use-unindexed", unindexedSearchCount));
+    }
+
+    return monitorAttrs;
+  }
+
+  private AttributeBuilder createNeedReindex(String attrName)
+  {
+    AttributeBuilder needReindex = new AttributeBuilder(attrName);
+    for (EntryContainer ec : rootContainer.getEntryContainers())
+    {
+      for (Tree tree : ec.listTrees())
       {
-        if(tree instanceof Index && !((Index)tree).isTrusted())
+        if (tree instanceof Index && !((Index) tree).isTrusted())
         {
           needReindex.add(tree.getName().toString());
         }
       }
     }
-    if(needReindex.size() > 0)
-    {
-      monitorAttrs.add(needReindex.toAttribute());
-    }
+    return needReindex;
+  }
 
-    if(filterUseEnabled)
-    {
-      monitorAttrs.add(Attributes.create("filter-use-startTime",
-          startTimeStamp));
-      AttributeBuilder builder = new AttributeBuilder("filter-use");
+  private Attribute createFilterUse(String attrName)
+  {
+    AttributeBuilder builder = new AttributeBuilder(attrName);
 
-      StringBuilder stringBuilder = new StringBuilder();
-      synchronized(filterToStats)
+    StringBuilder value = new StringBuilder();
+    synchronized (filterToStats)
+    {
+      for (Map.Entry<SearchFilter, FilterStats> entry : filterToStats.entrySet())
       {
-        for(Map.Entry<SearchFilter, FilterStats> entry :
-            filterToStats.entrySet())
-        {
-          entry.getKey().toString(stringBuilder);
-          stringBuilder.append(" hits:");
-          stringBuilder.append(entry.getValue().hits.get());
-          stringBuilder.append(" maxmatches:");
-          stringBuilder.append(entry.getValue().maxMatchingEntries);
-          stringBuilder.append(" message:");
-          stringBuilder.append(entry.getValue().failureReason);
-          builder.add(stringBuilder.toString());
-          stringBuilder.setLength(0);
-        }
+        entry.getKey().toString(value);
+        value.append(" hits:").append(entry.getValue().hits.get());
+        value.append(" maxmatches:").append(entry.getValue().maxMatchingEntries);
+        value.append(" message:").append(entry.getValue().failureReason);
+        builder.add(value.toString());
+        value.setLength(0);
       }
-      monitorAttrs.add(builder.toAttribute());
-      monitorAttrs.add(Attributes.create("filter-use-indexed",
-          String.valueOf(indexedSearchCount.get())));
-      monitorAttrs.add(Attributes.create("filter-use-unindexed",
-          String.valueOf(unindexedSearchCount.get())));
     }
+    return builder.toAttribute();
+  }
 
-    return monitorAttrs;
+  private Attribute createAttribute(String attrName, Object value)
+  {
+    return Attributes.create(attrName, String.valueOf(value));
   }
 
   /**
@@ -194,17 +197,17 @@
    */
   void updateStats(SearchFilter searchFilter, LocalizableMessage failureMessage)
   {
-    if(!filterUseEnabled)
+    if (!filterUseEnabled)
     {
       return;
     }
 
     FilterStats stats;
-    synchronized(filterToStats)
+    synchronized (filterToStats)
     {
       stats = filterToStats.get(searchFilter);
 
-      if(stats != null)
+      if (stats != null)
       {
         stats.update(1, failureMessage);
       }
@@ -228,17 +231,17 @@
    */
   void updateStats(SearchFilter searchFilter, long matchingEntries)
   {
-    if(!filterUseEnabled)
+    if (!filterUseEnabled)
     {
       return;
     }
 
     FilterStats stats;
-    synchronized(filterToStats)
+    synchronized (filterToStats)
     {
       stats = filterToStats.get(searchFilter);
 
-      if(stats != null)
+      if (stats != null)
       {
         stats.update(1, matchingEntries);
       }
@@ -259,13 +262,13 @@
    */
   void enableFilterUseStats(boolean enabled)
   {
-    if(enabled && !filterUseEnabled)
+    if (enabled && !filterUseEnabled)
     {
       startTimeStamp = TimeThread.getGMTTime();
       indexedSearchCount.set(0);
       unindexedSearchCount.set(0);
     }
-    else if(!enabled)
+    else if (!enabled)
     {
       filterToStats.clear();
     }
@@ -314,16 +317,15 @@
 
   private void removeLowestHit()
   {
-    while(!filterToStats.isEmpty() && filterToStats.size() > maxEntries)
+    while (!filterToStats.isEmpty() && filterToStats.size() > maxEntries)
     {
-      Iterator<Map.Entry<SearchFilter, FilterStats>> i =
-          filterToStats.entrySet().iterator();
-      Map.Entry<SearchFilter, FilterStats> lowest = i.next();
+      Iterator<Map.Entry<SearchFilter, FilterStats>> it = filterToStats.entrySet().iterator();
+      Map.Entry<SearchFilter, FilterStats> lowest = it.next();
       Map.Entry<SearchFilter, FilterStats> entry;
-      while(lowest.getValue().hits.get() > 1 && i.hasNext())
+      while (lowest.getValue().hits.get() > 1 && it.hasNext())
       {
-        entry = i.next();
-        if(entry.getValue().hits.get() < lowest.getValue().hits.get())
+        entry = it.next();
+        if (entry.getValue().hits.get() < lowest.getValue().hits.get())
         {
           lowest = entry;
         }
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexQueryFactoryImpl.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexQueryFactoryImpl.java
index 908e275..16e6ddc 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexQueryFactoryImpl.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexQueryFactoryImpl.java
@@ -46,7 +46,7 @@
 
 /**
  * This class is an implementation of IndexQueryFactory which creates
- * IndexQuery objects as part of the query of the JEB index.
+ * IndexQuery objects as part of the query to the index.
  */
 final class IndexQueryFactoryImpl implements IndexQueryFactory<IndexQuery>
 {
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/spi/Importer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/spi/Importer.java
index d1ba206..bb45035 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/spi/Importer.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/spi/Importer.java
@@ -32,6 +32,11 @@
 
 /**
  * Allows to run an import. For performance reasons, imports are run without transactions.
+ * <p>
+ * Since import is multi threaded, implementations must be thread-safe.
+ * <p>
+ *
+ * @ThreadSafe
  */
 public interface Importer extends Closeable
 {
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/spi/Storage.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/spi/Storage.java
index 882f5bb..b63fab8 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/spi/Storage.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/spi/Storage.java
@@ -111,10 +111,6 @@
    */
   boolean supportsBackupAndRestore();
 
-  /** {@inheritDoc} */
-  @Override
-  void close();
-
   /**
    * Creates a backup for this storage.
    *
@@ -149,9 +145,12 @@
   void restoreBackup(RestoreConfig restoreConfig) throws DirectoryException;
 
   /**
-   * TODO JNR.
+   * Lists the trees that exist in this storage.
    *
-   * @return TODO JNR
+   * @return a set of {@link TreeName}s representing the trees that exist in this storage
    */
   Set<TreeName> listTrees();
+
+  @Override
+  void close();
 }
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/tasks/RebuildTask.java b/opendj-server-legacy/src/main/java/org/opends/server/tasks/RebuildTask.java
index 26a3ee9..2413e34 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/tasks/RebuildTask.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/tasks/RebuildTask.java
@@ -59,7 +59,7 @@
 
 /**
  * This class provides an implementation of a Directory Server task that can be
- * used to rebuild indexes in the JEB backend..
+ * used to rebuild indexes in a backend.
  */
 public class RebuildTask extends Task
 {
diff --git a/opendj-server-legacy/src/test/java/org/opends/server/authorization/dseecompat/ReferencesTestCase.java b/opendj-server-legacy/src/test/java/org/opends/server/authorization/dseecompat/ReferencesTestCase.java
index 396d353..59392a0 100644
--- a/opendj-server-legacy/src/test/java/org/opends/server/authorization/dseecompat/ReferencesTestCase.java
+++ b/opendj-server-legacy/src/test/java/org/opends/server/authorization/dseecompat/ReferencesTestCase.java
@@ -37,12 +37,10 @@
 import java.io.BufferedReader;
 import java.io.IOException;
 
-
 /**
  * Unit test to test ACI behavior and Named Subordinate References (RFC 3296).
- * This test needs a jeb backend, the memory backend cannot be used.
+ * This test needs a pluggable backend, the memory backend cannot be used.
  */
-
 public class ReferencesTestCase extends AciTestCase{
   private static String suffix="dc=example,dc=com";
   private static final String level5User="uid=user.5,ou=People," + suffix;
diff --git a/opendj-server-legacy/src/test/java/org/opends/server/backends/jeb/JebTestCase.java b/opendj-server-legacy/src/test/java/org/opends/server/backends/jeb/JebTestCase.java
index f17599b..62bd449 100644
--- a/opendj-server-legacy/src/test/java/org/opends/server/backends/jeb/JebTestCase.java
+++ b/opendj-server-legacy/src/test/java/org/opends/server/backends/jeb/JebTestCase.java
@@ -26,31 +26,29 @@
  */
 package org.opends.server.backends.jeb;
 
-import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.*;
 
 import java.io.File;
 import java.util.ArrayList;
 import java.util.Random;
 import java.util.TreeMap;
 
+import org.forgerock.i18n.LocalizableMessage;
+import org.forgerock.opendj.ldap.ResultCode;
+import org.opends.server.DirectoryServerTestCase;
+import org.opends.server.TestCaseUtils;
 import org.opends.server.core.AddOperation;
 import org.opends.server.core.DeleteOperation;
 import org.opends.server.protocols.internal.InternalClientConnection;
 import org.opends.server.tools.makeldif.MakeLDIFInputStream;
 import org.opends.server.tools.makeldif.TemplateFile;
 import org.opends.server.types.DN;
-import org.opends.server.types.LDIFImportConfig;
-import org.forgerock.opendj.ldap.ResultCode;
-import org.opends.server.DirectoryServerTestCase;
-import org.opends.server.TestCaseUtils;
-import org.forgerock.i18n.LocalizableMessage;
 import org.opends.server.types.Entry;
+import org.opends.server.types.LDIFImportConfig;
 import org.opends.server.util.LDIFReader;
 import org.testng.annotations.Test;
 
-/**
- * An abstract base class for all Jeb backend test cases.
- */
+/** An abstract base class for all JE backend test cases. */
 @Test(groups = { "precommit", "jeb" }, sequential = true)
 public abstract class JebTestCase extends DirectoryServerTestCase {
     private TreeMap<DN,Entry> entryTreeMap = new TreeMap<>();
diff --git a/opendj-server-legacy/src/test/java/org/opends/server/backends/pluggable/DN2IDTest.java b/opendj-server-legacy/src/test/java/org/opends/server/backends/pluggable/DN2IDTest.java
index 9e03fc6..e69964d 100644
--- a/opendj-server-legacy/src/test/java/org/opends/server/backends/pluggable/DN2IDTest.java
+++ b/opendj-server-legacy/src/test/java/org/opends/server/backends/pluggable/DN2IDTest.java
@@ -27,7 +27,7 @@
 
 import static org.assertj.core.api.Assertions.*;
 import static org.mockito.Mockito.*;
-import static org.opends.server.ConfigurationMock.legacyMockCfg;
+import static org.opends.server.ConfigurationMock.*;
 import static org.opends.server.util.CollectionUtils.*;
 
 import java.util.ArrayList;
@@ -46,7 +46,6 @@
 import org.opends.server.backends.pdb.PDBStorage;
 import org.opends.server.backends.pluggable.spi.AccessMode;
 import org.opends.server.backends.pluggable.spi.Cursor;
-import org.opends.server.backends.pluggable.spi.Importer;
 import org.opends.server.backends.pluggable.spi.ReadOperation;
 import org.opends.server.backends.pluggable.spi.ReadableTransaction;
 import org.opends.server.backends.pluggable.spi.SequentialCursor;

--
Gitblit v1.10.0