mirror of https://github.com/OpenIdentityPlatform/OpenDJ.git

Violette Roche-Montane
07.27.2013 010eb1a3897f43458830cef58270d8196c3a4459
OPENDJ-473 CR-1252 Implement a task which allows the administrator to forcefully clear an index's degraded status.
6 files modified
2054 ■■■■ changed files
opends/resource/schema/02-config.ldif 7 ●●●● patch | view | raw | blame | history
opends/src/messages/messages/tools.properties 8 ●●●● patch | view | raw | blame | history
opends/src/server/org/opends/server/backends/jeb/RebuildConfig.java 56 ●●●● patch | view | raw | blame | history
opends/src/server/org/opends/server/backends/jeb/importLDIF/Importer.java 1954 ●●●●● patch | view | raw | blame | history
opends/src/server/org/opends/server/config/ConfigConstants.java 9 ●●●● patch | view | raw | blame | history
opends/src/server/org/opends/server/tools/RebuildIndex.java 20 ●●●●● patch | view | raw | blame | history
opends/resource/schema/02-config.ldif
@@ -3654,6 +3654,11 @@
  SYNTAX 1.3.6.1.4.1.1466.115.121.1.27
  SINGLE-VALUE
  X-ORIGIN 'OpenDJ Directory Server' )
attributeTypes: ( 1.3.6.1.4.1.36733.2.1.1.116
  NAME 'ds-task-rebuild-index-clear-degraded-state'
  EQUALITY caseIgnoreMatch
  SYNTAX 1.3.6.1.4.1.1466.115.121.1.15
  X-ORIGIN 'OpenDS Directory Server' )
objectClasses: ( 1.3.6.1.4.1.26027.1.2.1
  NAME 'ds-cfg-access-control-handler'
  SUP top
@@ -4638,7 +4643,7 @@
  STRUCTURAL
  MUST ( ds-task-rebuild-base-dn $
         ds-task-rebuild-index )
  MAY ds-task-rebuild-tmp-directory
  MAY ( ds-task-rebuild-tmp-directory $ ds-task-rebuild-index-clear-degraded-state )
  X-ORIGIN 'OpenDS Directory Server' )
objectClasses: ( 1.3.6.1.4.1.26027.1.2.98
  NAME 'ds-virtual-static-group'
opends/src/messages/messages/tools.properties
@@ -21,7 +21,7 @@
# CDDL HEADER END
#
#      Copyright 2006-2010 Sun Microsystems, Inc.
#      Portions Copyright 2011-2012 ForgeRock AS
#      Portions Copyright 2011-2013 ForgeRock AS
@@ -2595,3 +2595,9 @@
 cannot be specified with the "--rebuildDegraded" option
SEVERE_ERR_CONFIGDS_CANNOT_UPDATE_DIGEST_MD5_FQDN_1733=An error occurred while \
 attempting to update the FQDN for the DIGEST-MD5 SASL mechanism:  %s
INFO_REBUILDINDEX_DESCRIPTION_CLEAR_DEGRADED_STATE_1734=Indicates that indexes do not need \
rebuilding because they are known to be empty and forcefully marks them as valid. \
This is an advanced option which must only be used in cases where a degraded index \
is known to be empty and does not therefore need rebuilding. \
This situation typically arises when an index is created for an attribute \
which has just been added to the schema
opends/src/server/org/opends/server/backends/jeb/RebuildConfig.java
@@ -23,7 +23,7 @@
 *
 *
 *      Copyright 2006-2009 Sun Microsystems, Inc.
 *      Portions copyright 2012 ForgeRock AS.
 *      Portions Copyright 2011-2013 ForgeRock AS
 */
package org.opends.server.backends.jeb;
@@ -39,7 +39,8 @@
  /**
   * Identifies how indexes will be selected for rebuild.
   */
  public static enum RebuildMode {
  public static enum RebuildMode
  {
    /**
     * Rebuild all indexes, including system indexes.
     */
@@ -70,8 +71,10 @@
  private String tmpDirectory;
  private boolean isClearDegradedState;
  /**
   * Create a new rebuild configuraiton.
   * Create a new rebuild configuration.
   */
  public RebuildConfig()
  {
@@ -80,6 +83,7 @@
  /**
   * Get the base DN to rebuild.
   *
   * @return The base DN to rebuild.
   */
  public DN getBaseDN()
@@ -89,7 +93,9 @@
  /**
   * Set the base DN to rebuild.
   * @param baseDN The base DN to rebuild.
   *
   * @param baseDN
   *          The base DN to rebuild.
   */
  public void setBaseDN(DN baseDN)
  {
@@ -109,11 +115,12 @@
  /**
   * Add an index to be rebuilt into the configuration. Duplicate index names
   * will be ignored. Adding an index that causes a mix of complete and partial
   * rebuild for the same attribute index in the configuration will remove
   * the partial and just keep the complete attribute index name.
   * (ie. uid and uid.presence).
   * rebuild for the same attribute index in the configuration will remove the
   * partial and just keep the complete attribute index name. (ie. uid and
   * uid.presence).
   *
   * @param index The index to add.
   * @param index
   *          The index to add.
   */
  public void addRebuildIndex(String index)
  {
@@ -150,7 +157,8 @@
   * Check the given config for conflicts with this config. A conflict is
   * detected if both configs specify the same indexType/database to be rebuilt.
   *
   * @param config The rebuild config to check against.
   * @param config
   *          The rebuild config to check against.
   * @return the name of the indexType causing the conflict or null if no
   *         conflict is detected.
   */
@@ -217,11 +225,11 @@
    return false;
  }
  /**
   * Set the temporary directory to the specified path.
   *
   * @param path The path to set the temporary directory to.
   * @param path
   *          The path to set the temporary directory to.
   */
  public void setTmpDirectory(String path)
  {
@@ -238,11 +246,11 @@
    return tmpDirectory;
  }
  /**
   * Sets the rebuild mode.
   *
   * @param mode The new rebuild mode.
   * @param mode
   *          The new rebuild mode.
   */
  public void setRebuildMode(RebuildMode mode)
  {
@@ -259,5 +267,27 @@
    return rebuildMode;
  }
  /**
   * Returns {@code true} if indexes should be forcefully marked as valid even
   * if they are currently degraded.
   *
   * @return {@code true} if index should be forcefully marked as valid.
   */
  public boolean isClearDegradedState()
  {
    return isClearDegradedState;
  }
  /**
   * Sets the 'clear degraded index' status.
   *
   * @param isClearDegradedState
   *          {@code true} if indexes should be forcefully marked as valid even
   *          if they are currently degraded.
   */
  public void isClearDegradedState(boolean isClearDegradedState)
  {
    this.isClearDegradedState = isClearDegradedState;
  }
}
opends/src/server/org/opends/server/backends/jeb/importLDIF/Importer.java
@@ -52,6 +52,7 @@
import org.opends.server.admin.std.server.LocalDBIndexCfg;
import org.opends.server.api.DiskSpaceMonitorHandler;
import org.opends.server.backends.jeb.*;
import org.opends.server.backends.jeb.RebuildConfig.RebuildMode;
import org.opends.server.config.ConfigException;
import org.opends.server.core.DirectoryServer;
import org.opends.server.extensions.DiskSpaceMonitor;
@@ -64,7 +65,6 @@
import com.sleepycat.je.*;
import com.sleepycat.util.PackedInteger;
/**
 * This class provides the engine that performs both importing of LDIF files and
 * the rebuilding of indexes.
@@ -90,8 +90,8 @@
  //Defaults for LDIF reader buffers, min memory required to import and default
  //size for byte buffers.
  private static final int READER_WRITER_BUFFER_SIZE = 8 * KB;
  private static final int MIN_DB_CACHE_MEMORY = MAX_DB_CACHE_SIZE +
                                                 MAX_DB_LOG_SIZE;
  private static final int MIN_DB_CACHE_MEMORY = MAX_DB_CACHE_SIZE
      + MAX_DB_LOG_SIZE;
  private static final int BYTE_BUFFER_CAPACITY = 128;
  //Min and MAX sizes of phase one buffer.
@@ -165,8 +165,7 @@
  //Map of index keys to index buffers.  Used to allocate sorted
  //index buffers to a index writer thread.
  private final
  Map<IndexKey, BlockingQueue<IndexOutputBuffer>> indexKeyQueMap =
  private final Map<IndexKey, BlockingQueue<IndexOutputBuffer>> indexKeyQueMap =
          new ConcurrentHashMap<IndexKey, BlockingQueue<IndexOutputBuffer>>();
  //Map of DB containers to index managers. Used to start phase 2.
@@ -185,7 +184,6 @@
  //the index file writer tasks when the LDIF file has been done.
  private final List<ScratchFileWriterTask> scratchFileWriterList;
  //Map of DNs to Suffix objects.
  private final Map<DN, Suffix> dnSuffixMap = new LinkedHashMap<DN, Suffix>();
@@ -214,7 +212,6 @@
  //Number of phase one buffers
  private int phaseOneBufferCount;
  static
  {
    if ((dnType = DirectoryServer.getAttributeType("dn")) == null)
@@ -223,8 +220,6 @@
    }
  }
  /**
   * Create a new import job with the specified rebuild index config.
   *
@@ -251,8 +246,8 @@
    this.threadCount = 1;
    this.rebuildManager = new RebuildIndexManager(rebuildConfig, cfg);
    this.indexCount = rebuildManager.getIndexCount();
    this.scratchFileWriterList = new ArrayList<ScratchFileWriterTask>(
        indexCount);
    this.scratchFileWriterList =
        new ArrayList<ScratchFileWriterTask>(indexCount);
    this.scratchFileWriterFutures = new CopyOnWriteArrayList<Future<?>>();
    File parentDir;
@@ -269,16 +264,14 @@
    recursiveDelete(tempDir);
    if (!tempDir.exists() && !tempDir.mkdirs())
    {
      Message message = ERR_JEB_IMPORT_CREATE_TMPDIR_ERROR.get(String
          .valueOf(tempDir));
      Message message =
          ERR_JEB_IMPORT_CREATE_TMPDIR_ERROR.get(String.valueOf(tempDir));
      throw new InitializationException(message);
    }
    this.skipDNValidation = true;
    initializeDBEnv(envConfig);
  }
  /**
   * Create a new import job with the specified ldif import config.
   *
@@ -323,8 +316,8 @@
        this.clearedBackend = true;
      }
    }
    this.scratchFileWriterList = new ArrayList<ScratchFileWriterTask>(
        indexCount);
    this.scratchFileWriterList =
        new ArrayList<ScratchFileWriterTask>(indexCount);
    this.scratchFileWriterFutures = new CopyOnWriteArrayList<Future<?>>();
    File parentDir;
    if (importConfiguration.getTmpDirectory() == null)
@@ -339,8 +332,8 @@
    recursiveDelete(tempDir);
    if (!tempDir.exists() && !tempDir.mkdirs())
    {
      Message message = ERR_JEB_IMPORT_CREATE_TMPDIR_ERROR.get(String
          .valueOf(tempDir));
      Message message =
          ERR_JEB_IMPORT_CREATE_TMPDIR_ERROR.get(String.valueOf(tempDir));
      throw new InitializationException(message);
    }
    skipDNValidation = importConfiguration.getSkipDNValidation();
@@ -359,8 +352,6 @@
    }
  }
  private int getTotalIndexCount(LocalDBBackendCfg localDBBackendCfg)
      throws ConfigException
  {
@@ -371,8 +362,8 @@
      SortedSet<IndexType> types = index.getIndexType();
      if (types.contains(IndexType.EXTENSIBLE))
      {
        indexes += types.size() - 1
            + index.getIndexExtensibleMatchingRule().size();
        indexes +=
            types.size() - 1 + index.getIndexExtensibleMatchingRule().size();
      }
      else
      {
@@ -382,14 +373,14 @@
    return indexes;
  }
  /**
   * Return the suffix instance in the specified map that matches the specified
   * DN.
   *
   * @param dn The DN to search for.
   * @param map The map to search.
   * @param dn
   *          The DN to search for.
   * @param map
   *          The map to search.
   * @return The suffix instance that matches the DN, or null if no match is
   *         found.
   */
@@ -398,7 +389,8 @@
    Suffix suffix = null;
    DN nodeDN = dn;
    while (suffix == null && nodeDN != null) {
    while (suffix == null && nodeDN != null)
    {
      suffix = map.get(nodeDN);
      if (suffix == null)
      {
@@ -408,8 +400,6 @@
    return suffix;
  }
  /**
   * Calculate buffer sizes and initialize JEB properties based on memory.
   *
@@ -426,8 +416,8 @@
    // scratch writers (1 per index).
    calculateAvailableMemory();
    final long usableMemory = availableMemory
        - (indexCount * READER_WRITER_BUFFER_SIZE);
    final long usableMemory =
        availableMemory - (indexCount * READER_WRITER_BUFFER_SIZE);
    // We need caching when doing DN validation or rebuilding indexes.
    if (!skipDNValidation || (rebuildManager != null))
@@ -480,16 +470,16 @@
      }
    }
    final long phaseOneBufferMemory = usableMemory - dbCacheSize
        - tmpEnvCacheSize;
    final long phaseOneBufferMemory =
        usableMemory - dbCacheSize - tmpEnvCacheSize;
    final int oldThreadCount = threadCount;
    while (true)
    {
      phaseOneBufferCount = 2 * indexCount * threadCount;
      // Scratch writers allocate 4 buffers per index as well.
      final int totalPhaseOneBufferCount = phaseOneBufferCount
          + (4 * indexCount);
      final int totalPhaseOneBufferCount =
          phaseOneBufferCount + (4 * indexCount);
      bufferSize = (int) (phaseOneBufferMemory / totalPhaseOneBufferCount);
      if (bufferSize > MAX_BUFFER_SIZE)
@@ -500,8 +490,8 @@
          // temp DB.
          bufferSize = MAX_BUFFER_SIZE;
          final long extraMemory = phaseOneBufferMemory
              - (totalPhaseOneBufferCount * bufferSize);
          final long extraMemory =
              phaseOneBufferMemory - (totalPhaseOneBufferCount * bufferSize);
          if (!clearedBackend)
          {
            dbCacheSize += extraMemory / 2;
@@ -528,9 +518,10 @@
      else
      {
        // Not enough memory.
        final long minimumPhaseOneBufferMemory = totalPhaseOneBufferCount
            * MIN_BUFFER_SIZE;
        Message message = ERR_IMPORT_LDIF_LACK_MEM.get(usableMemory,
        final long minimumPhaseOneBufferMemory =
            totalPhaseOneBufferCount * MIN_BUFFER_SIZE;
        Message message =
            ERR_IMPORT_LDIF_LACK_MEM.get(usableMemory,
            minimumPhaseOneBufferMemory + dbCacheSize + tmpEnvCacheSize);
        throw new InitializationException(message);
      }
@@ -538,13 +529,14 @@
    if (oldThreadCount != threadCount)
    {
      Message message = NOTE_JEB_IMPORT_ADJUST_THREAD_COUNT.get(oldThreadCount,
          threadCount);
      Message message =
          NOTE_JEB_IMPORT_ADJUST_THREAD_COUNT.get(oldThreadCount, threadCount);
      logError(message);
    }
    Message message = NOTE_JEB_IMPORT_LDIF_TOT_MEM_BUF.get(
        availableMemory, phaseOneBufferCount);
    Message message =
        NOTE_JEB_IMPORT_LDIF_TOT_MEM_BUF.get(availableMemory,
            phaseOneBufferCount);
    logError(message);
    if (tmpEnvCacheSize > 0)
    {
@@ -557,8 +549,6 @@
    logError(message);
  }
  /**
   * Calculates the amount of available memory which can be used by this import,
   * taking into account whether or not the import is running offline or online
@@ -584,13 +574,14 @@
      }
      else
      {
        configuredMemory = backendConfiguration.getDBCachePercent()
        configuredMemory =
            backendConfiguration.getDBCachePercent()
            * Runtime.getRuntime().maxMemory() / 100;
      }
      // Round up to minimum of 16MB (e.g. unit tests only use 2% cache).
      totalAvailableMemory = Math.max(Math.min(usableMemory, configuredMemory),
          16 * MB);
      totalAvailableMemory =
          Math.max(Math.min(usableMemory, configuredMemory), 16 * MB);
    }
    else
    {
@@ -614,7 +605,6 @@
    availableMemory = (totalAvailableMemory * importMemPct / 100);
  }
  private void initializeIndexBuffers()
  {
    for(int i = 0; i < phaseOneBufferCount; i++)
@@ -624,9 +614,8 @@
    }
  }
  private void initializeSuffixes() throws DatabaseException,
           ConfigException, InitializationException
  private void initializeSuffixes() throws DatabaseException, ConfigException,
      InitializationException
  {
    for(EntryContainer ec : rootContainer.getEntryContainers())
    {
@@ -639,43 +628,51 @@
    }
  }
  //Mainly used to support multiple suffixes. Each index in each suffix gets
  //an unique ID to identify which DB it needs to go to in phase two processing.
  private void generateIndexID(Suffix suffix)
  {
    for(Map.Entry<AttributeType, AttributeIndex> mapEntry :
            suffix.getAttrIndexMap().entrySet()) {
    for (Map.Entry<AttributeType, AttributeIndex> mapEntry : suffix
        .getAttrIndexMap().entrySet())
    {
      AttributeIndex attributeIndex = mapEntry.getValue();
      DatabaseContainer container;
      if((container=attributeIndex.getEqualityIndex()) != null) {
      if ((container = attributeIndex.getEqualityIndex()) != null)
      {
        int id = System.identityHashCode(container);
        idContainerMap.putIfAbsent(id, container);
      }
      if((container=attributeIndex.getPresenceIndex()) != null) {
      if ((container = attributeIndex.getPresenceIndex()) != null)
      {
        int id = System.identityHashCode(container);
        idContainerMap.putIfAbsent(id, container);
      }
      if((container=attributeIndex.getSubstringIndex()) != null) {
      if ((container = attributeIndex.getSubstringIndex()) != null)
      {
        int id = System.identityHashCode(container);
        idContainerMap.putIfAbsent(id, container);
      }
      if((container=attributeIndex.getOrderingIndex()) != null) {
      if ((container = attributeIndex.getOrderingIndex()) != null)
      {
        int id = System.identityHashCode(container);
        idContainerMap.putIfAbsent(id, container);
      }
      if((container=attributeIndex.getApproximateIndex()) != null) {
      if ((container = attributeIndex.getApproximateIndex()) != null)
      {
        int id = System.identityHashCode(container);
        idContainerMap.putIfAbsent(id, container);
      }
      Map<String,Collection<Index>> extensibleMap =
              attributeIndex.getExtensibleIndexes();
      if(!extensibleMap.isEmpty()) {
      if (!extensibleMap.isEmpty())
      {
        Collection<Index> subIndexes =
                attributeIndex.getExtensibleIndexes().get(
                        EXTENSIBLE_INDEXER_ID_SUBSTRING);
        if(subIndexes != null) {
          for(DatabaseContainer subIndex : subIndexes) {
        if (subIndexes != null)
        {
          for (DatabaseContainer subIndex : subIndexes)
          {
            int id = System.identityHashCode(subIndex);
            idContainerMap.putIfAbsent(id, subIndex);
          }
@@ -683,8 +680,10 @@
        Collection<Index> sharedIndexes =
                attributeIndex.getExtensibleIndexes().get(
                        EXTENSIBLE_INDEXER_ID_SHARED);
        if(sharedIndexes !=null) {
          for(DatabaseContainer sharedIndex : sharedIndexes) {
        if (sharedIndexes != null)
        {
          for (DatabaseContainer sharedIndex : sharedIndexes)
          {
            int id = System.identityHashCode(sharedIndex);
            idContainerMap.putIfAbsent(id, sharedIndex);
          }
@@ -693,16 +692,16 @@
    }
  }
  private Suffix getSuffix(EntryContainer entryContainer)
     throws ConfigException, InitializationException {
      throws ConfigException, InitializationException
  {
   DN baseDN = entryContainer.getBaseDN();
   EntryContainer sourceEntryContainer = null;
   List<DN> includeBranches = new ArrayList<DN>();
   List<DN> excludeBranches = new ArrayList<DN>();
   if(!importConfiguration.appendToExistingData() &&
       !importConfiguration.clearBackend())
    if (!importConfiguration.appendToExistingData()
        && !importConfiguration.clearBackend())
   {
     for(DN dn : importConfiguration.getExcludeBranches())
     {
@@ -730,8 +729,8 @@
       if(includeBranches.isEmpty())
       {
           /*
            There are no branches in the explicitly defined include list under
            this base DN. Skip this base DN all together.
           * There are no branches in the explicitly defined include list under
           * this base DN. Skip this base DN all together.
           */
           return null;
@@ -779,9 +778,8 @@
         }
       }
       if((includeBranches.size() == 1) &&
           excludeBranches.isEmpty() &&
           includeBranches.get(0).equals(baseDN))
        if ((includeBranches.size() == 1) && excludeBranches.isEmpty()
            && includeBranches.get(0).equals(baseDN))
       {
         // This entire base DN is explicitly included in the import with
         // no exclude branches that we need to migrate. Just clear the entry
@@ -795,9 +793,9 @@
         // Create a temp entry container
         sourceEntryContainer = entryContainer;
         entryContainer =
             rootContainer.openEntryContainer(baseDN,
                                              baseDN.toNormalizedString() +
                                                  "_importTmp");
              rootContainer.openEntryContainer(baseDN, baseDN
                  .toNormalizedString()
                  + "_importTmp");
       }
     }
   }
@@ -805,44 +803,45 @@
                                     includeBranches, excludeBranches);
 }
  /**
   * Rebuild the indexes using the specified rootcontainer.
   *
   * @param rootContainer The rootcontainer to rebuild indexes in.
   *
   * @throws ConfigException If a configuration error occurred.
   * @throws InitializationException If an initialization error occurred.
   * @throws JebException If the JEB database had an error.
   * @throws InterruptedException If an interrupted error occurred.
   * @throws ExecutionException If an execution error occurred.
   * @param rootContainer
   *          The rootcontainer to rebuild indexes in.
   * @throws ConfigException
   *           If a configuration error occurred.
   * @throws InitializationException
   *           If an initialization error occurred.
   * @throws JebException
   *           If the JEB database had an error.
   * @throws InterruptedException
   *           If an interrupted error occurred.
   * @throws ExecutionException
   *           If an execution error occurred.
   */
  public void
  rebuildIndexes(RootContainer rootContainer) throws ConfigException,
          InitializationException, JebException,
  public void rebuildIndexes(RootContainer rootContainer)
      throws ConfigException, InitializationException, JebException,
          InterruptedException, ExecutionException
  {
    this.rootContainer = rootContainer;
    long startTime = System.currentTimeMillis();
    DiskSpaceMonitor tmpMonitor = new DiskSpaceMonitor(
        backendConfiguration.getBackendId() +
            " backend index rebuild tmp directory",
        tempDir, backendConfiguration.getDiskLowThreshold(),
        backendConfiguration.getDiskFullThreshold(), 5,
        TimeUnit.SECONDS, this);
    DiskSpaceMonitor tmpMonitor =
        new DiskSpaceMonitor(backendConfiguration.getBackendId()
            + " backend index rebuild tmp directory", tempDir,
            backendConfiguration.getDiskLowThreshold(), backendConfiguration
                .getDiskFullThreshold(), 5, TimeUnit.SECONDS, this);
    tmpMonitor.initializeMonitorProvider(null);
    DirectoryServer.registerMonitorProvider(tmpMonitor);
    File parentDirectory =
        getFileForPath(backendConfiguration.getDBDirectory());
    File backendDirectory =
        new File(parentDirectory, backendConfiguration.getBackendId());
    DiskSpaceMonitor dbMonitor = new DiskSpaceMonitor(
        backendConfiguration.getBackendId() +
            " backend index rebuild DB directory",
        backendDirectory, backendConfiguration.getDiskLowThreshold(),
        backendConfiguration.getDiskFullThreshold(), 5,
        TimeUnit.SECONDS, this);
    DiskSpaceMonitor dbMonitor =
        new DiskSpaceMonitor(backendConfiguration.getBackendId()
            + " backend index rebuild DB directory", backendDirectory,
            backendConfiguration.getDiskLowThreshold(), backendConfiguration
                .getDiskFullThreshold(), 5, TimeUnit.SECONDS, this);
    dbMonitor.initializeMonitorProvider(null);
    DirectoryServer.registerMonitorProvider(dbMonitor);
@@ -856,40 +855,41 @@
    }
    finally
    {
      DirectoryServer.deregisterMonitorProvider(
          tmpMonitor.getMonitorInstanceName());
      DirectoryServer.deregisterMonitorProvider(
          dbMonitor.getMonitorInstanceName());
      DirectoryServer.deregisterMonitorProvider(tmpMonitor
          .getMonitorInstanceName());
      DirectoryServer.deregisterMonitorProvider(dbMonitor
          .getMonitorInstanceName());
      tmpMonitor.finalizeMonitorProvider();
      dbMonitor.finalizeMonitorProvider();
    }
  }
  /**
   * Import a LDIF using the specified root container.
   *
   * @param rootContainer The root container to use during the import.
   *
   * @param rootContainer
   *          The root container to use during the import.
   * @return A LDIF result.
   * @throws ConfigException If the import failed because of an configuration
   *                         error.
   * @throws InitializationException If the import failed because of an
   *               initialization error.
   * @throws JebException If the import failed due to a database error.
   * @throws InterruptedException If the import failed due to an interrupted
   *                              error.
   * @throws ExecutionException If the import failed due to an execution error.
   * @throws ConfigException
   *           If the import failed because of an configuration error.
   * @throws InitializationException
   *           If the import failed because of an initialization error.
   * @throws JebException
   *           If the import failed due to a database error.
   * @throws InterruptedException
   *           If the import failed due to an interrupted error.
   * @throws ExecutionException
   *           If the import failed due to an execution error.
   */
  public LDIFImportResult
  processImport(RootContainer rootContainer) throws ConfigException,
          InitializationException, JebException,
  public LDIFImportResult processImport(RootContainer rootContainer)
      throws ConfigException, InitializationException, JebException,
          InterruptedException, ExecutionException
  {
    this.rootContainer = rootContainer;
    try
    {
      reader = new LDIFReader(importConfiguration, rootContainer,
      reader =
          new LDIFReader(importConfiguration, rootContainer,
          READER_WRITER_BUFFER_SIZE);
    }
    catch (IOException ioe)
@@ -898,29 +898,31 @@
      throw new InitializationException(message, ioe);
    }
    DiskSpaceMonitor tmpMonitor = new DiskSpaceMonitor(
        backendConfiguration.getBackendId() + " backend import tmp directory",
        tempDir, backendConfiguration.getDiskLowThreshold(),
        backendConfiguration.getDiskFullThreshold(), 5,
        TimeUnit.SECONDS, this);
    DiskSpaceMonitor tmpMonitor =
        new DiskSpaceMonitor(backendConfiguration.getBackendId()
            + " backend import tmp directory", tempDir, backendConfiguration
            .getDiskLowThreshold(),
            backendConfiguration.getDiskFullThreshold(), 5, TimeUnit.SECONDS,
            this);
    tmpMonitor.initializeMonitorProvider(null);
    DirectoryServer.registerMonitorProvider(tmpMonitor);
    File parentDirectory =
        getFileForPath(backendConfiguration.getDBDirectory());
    File backendDirectory =
        new File(parentDirectory, backendConfiguration.getBackendId());
    DiskSpaceMonitor dbMonitor = new DiskSpaceMonitor(
        backendConfiguration.getBackendId() + " backend import DB directory",
        backendDirectory, backendConfiguration.getDiskLowThreshold(),
        backendConfiguration.getDiskFullThreshold(), 5,
        TimeUnit.SECONDS, this);
    DiskSpaceMonitor dbMonitor =
        new DiskSpaceMonitor(backendConfiguration.getBackendId()
            + " backend import DB directory", backendDirectory,
            backendConfiguration.getDiskLowThreshold(), backendConfiguration
                .getDiskFullThreshold(), 5, TimeUnit.SECONDS, this);
    dbMonitor.initializeMonitorProvider(null);
    DirectoryServer.registerMonitorProvider(dbMonitor);
    try
    {
      Message message = NOTE_JEB_IMPORT_STARTING.get(
          DirectoryServer.getVersionString(), BUILD_ID, REVISION_NUMBER);
      Message message =
          NOTE_JEB_IMPORT_STARTING.get(DirectoryServer.getVersionString(),
              BUILD_ID, REVISION_NUMBER);
      logError(message);
      message = NOTE_JEB_IMPORT_THREAD_COUNT.get(threadCount);
      logError(message);
@@ -951,31 +953,33 @@
      long finishTime = System.currentTimeMillis();
      long importTime = (finishTime - startTime);
      float rate = 0;
      message = NOTE_JEB_IMPORT_PHASE_STATS.get(importTime / 1000,
      message =
          NOTE_JEB_IMPORT_PHASE_STATS.get(importTime / 1000,
          (phaseOneFinishTime - startTime) / 1000,
          (phaseTwoFinishTime - phaseTwoTime) / 1000);
      logError(message);
      if (importTime > 0) rate = 1000f * reader.getEntriesRead() / importTime;
      message = NOTE_JEB_IMPORT_FINAL_STATUS.get(reader.getEntriesRead(),
          importCount.get(), reader.getEntriesIgnored(),
          reader.getEntriesRejected(), migratedCount, importTime / 1000, rate);
      if (importTime > 0)
        rate = 1000f * reader.getEntriesRead() / importTime;
      message =
          NOTE_JEB_IMPORT_FINAL_STATUS.get(reader.getEntriesRead(), importCount
              .get(), reader.getEntriesIgnored(), reader.getEntriesRejected(),
              migratedCount, importTime / 1000, rate);
      logError(message);
    }
    finally
    {
      reader.close();
      DirectoryServer.deregisterMonitorProvider(
          tmpMonitor.getMonitorInstanceName());
      DirectoryServer.deregisterMonitorProvider(
          dbMonitor.getMonitorInstanceName());
      DirectoryServer.deregisterMonitorProvider(tmpMonitor
          .getMonitorInstanceName());
      DirectoryServer.deregisterMonitorProvider(dbMonitor
          .getMonitorInstanceName());
      tmpMonitor.finalizeMonitorProvider();
      dbMonitor.finalizeMonitorProvider();
    }
    return new LDIFImportResult(reader.getEntriesRead(),
        reader.getEntriesRejected(), reader.getEntriesIgnored());
    return new LDIFImportResult(reader.getEntriesRead(), reader
        .getEntriesRejected(), reader.getEntriesIgnored());
  }
  private void recursiveDelete(File dir)
  {
    if(dir.listFiles() != null)
@@ -992,16 +996,16 @@
    dir.delete();
  }
  private void switchContainers()
    throws DatabaseException, JebException, InitializationException
  private void switchContainers() throws DatabaseException, JebException,
      InitializationException
  {
     for(Suffix suffix : dnSuffixMap.values()) {
    for (Suffix suffix : dnSuffixMap.values())
    {
       DN baseDN = suffix.getBaseDN();
       EntryContainer entryContainer =
               suffix.getSrcEntryContainer();
       if(entryContainer != null) {
      EntryContainer entryContainer = suffix.getSrcEntryContainer();
      if (entryContainer != null)
      {
         EntryContainer needRegisterContainer =
                 rootContainer.unregisterEntryContainer(baseDN);
@@ -1018,12 +1022,12 @@
     }
   }
  private void setIndexesTrusted(boolean trusted) throws JebException
  {
    try {
      for(Suffix s : dnSuffixMap.values()) {
    try
    {
      for (Suffix s : dnSuffixMap.values())
      {
        s.setIndexesTrusted(trusted);
      }
    }
@@ -1035,14 +1039,12 @@
    }
  }
  private void phaseOne() throws InterruptedException, ExecutionException
  {
    initializeIndexBuffers();
    FirstPhaseProgressTask progressTask = new FirstPhaseProgressTask();
    ScheduledThreadPoolExecutor timerService = new ScheduledThreadPoolExecutor(
        1);
    ScheduledThreadPoolExecutor timerService =
        new ScheduledThreadPoolExecutor(1);
    timerService.scheduleAtFixedRate(progressTask, TIMER_INTERVAL,
        TIMER_INTERVAL, TimeUnit.MILLISECONDS);
    scratchFileWriterService = Executors.newFixedThreadPool(2 * indexCount);
@@ -1120,14 +1122,12 @@
    freeBufferQueue.clear();
  }
  private void phaseTwo() throws InterruptedException, ExecutionException
  {
    SecondPhaseProgressTask progress2Task = new SecondPhaseProgressTask(
        reader.getEntriesRead());
    ScheduledThreadPoolExecutor timerService = new ScheduledThreadPoolExecutor(
        1);
    SecondPhaseProgressTask progress2Task =
        new SecondPhaseProgressTask(reader.getEntriesRead());
    ScheduledThreadPoolExecutor timerService =
        new ScheduledThreadPoolExecutor(1);
    timerService.scheduleAtFixedRate(progress2Task, TIMER_INTERVAL,
        TIMER_INTERVAL, TimeUnit.MILLISECONDS);
    try
@@ -1141,10 +1141,8 @@
    }
  }
  private void processIndexFiles()
      throws InterruptedException, ExecutionException
  private void processIndexFiles() throws InterruptedException,
      ExecutionException
  {
    if(bufferCount.get() == 0)
    {
@@ -1162,8 +1160,8 @@
    int buffers;
    while (true)
    {
      final List<IndexManager> totList = new ArrayList<IndexManager>(
          DNIndexMgrList);
      final List<IndexManager> totList =
          new ArrayList<IndexManager>(DNIndexMgrList);
      totList.addAll(indexMgrList);
      Collections.sort(totList, Collections.reverseOrder());
@@ -1207,8 +1205,9 @@
    // processing of smaller indexes.
    dbThreads = Math.max(2, dbThreads);
    Message message = NOTE_JEB_IMPORT_LDIF_PHASE_TWO_MEM_REPORT.get(
        availableMemory, readAheadSize, buffers);
    Message message =
        NOTE_JEB_IMPORT_LDIF_PHASE_TWO_MEM_REPORT.get(availableMemory,
            readAheadSize, buffers);
    logError(message);
    // Start indexing tasks.
@@ -1239,8 +1238,6 @@
    dbService.shutdown();
  }
  private void stopScratchFileWriters()
  {
    IndexOutputBuffer indexBuffer = new IndexOutputBuffer(0);
@@ -1250,7 +1247,6 @@
    }
  }
  /**
   * Task used to migrate excluded branch.
   */
@@ -1263,45 +1259,51 @@
    @Override
    public Void call() throws Exception
    {
      for(Suffix suffix : dnSuffixMap.values()) {
      for (Suffix suffix : dnSuffixMap.values())
      {
        EntryContainer entryContainer = suffix.getSrcEntryContainer();
        if(entryContainer != null &&
                !suffix.getExcludeBranches().isEmpty()) {
        if (entryContainer != null && !suffix.getExcludeBranches().isEmpty())
        {
          DatabaseEntry key = new DatabaseEntry();
          DatabaseEntry data = new DatabaseEntry();
          LockMode lockMode = LockMode.DEFAULT;
          OperationStatus status;
          Message message = NOTE_JEB_IMPORT_MIGRATION_START.get(
                  "excluded", String.valueOf(suffix.getBaseDN()));
          Message message =
              NOTE_JEB_IMPORT_MIGRATION_START.get("excluded", String
                  .valueOf(suffix.getBaseDN()));
          logError(message);
          Cursor cursor =
                  entryContainer.getDN2ID().openCursor(null,
                          CursorConfig.READ_COMMITTED);
          Comparator<byte[]> comparator =
                  entryContainer.getDN2ID().getComparator();
          try {
            for(DN excludedDN : suffix.getExcludeBranches()) {
              byte[] bytes = JebFormat.dnToDNKey(
                  excludedDN, suffix.getBaseDN().getNumComponents());
          try
          {
            for (DN excludedDN : suffix.getExcludeBranches())
            {
              byte[] bytes =
                  JebFormat.dnToDNKey(excludedDN, suffix.getBaseDN()
                      .getNumComponents());
              key.setData(bytes);
              status = cursor.getSearchKeyRange(key, data, lockMode);
              if(status == OperationStatus.SUCCESS &&
                      Arrays.equals(key.getData(), bytes)) {
              if (status == OperationStatus.SUCCESS
                  && Arrays.equals(key.getData(), bytes))
              {
                // This is the base entry for a branch that was excluded in the
                // import so we must migrate all entries in this branch over to
                // the new entry container.
                byte[] end = Arrays.copyOf(bytes, bytes.length+1);
                end[end.length-1] = 0x01;
                while(status == OperationStatus.SUCCESS &&
                      comparator.compare(key.getData(), end) < 0 &&
                      !importConfiguration.isCancelled() &&
                      !isCanceled) {
                while (status == OperationStatus.SUCCESS
                    && comparator.compare(key.getData(), end) < 0
                    && !importConfiguration.isCancelled() && !isCanceled)
                {
                  EntryID id = new EntryID(data);
                  Entry entry = entryContainer.getID2Entry().get(null,
                          id, LockMode.DEFAULT);
                  processEntry(entry, rootContainer.getNextEntryID(),
                          suffix);
                  Entry entry =
                      entryContainer.getID2Entry().get(null, id,
                          LockMode.DEFAULT);
                  processEntry(entry, rootContainer.getNextEntryID(), suffix);
                  migratedCount++;
                  status = cursor.getNext(key, data, lockMode);
                }
@@ -1312,7 +1314,8 @@
          catch (Exception e)
          {
            message =
              ERR_JEB_IMPORT_LDIF_MIGRATE_EXCLUDED_TASK_ERR.get(e.getMessage());
                ERR_JEB_IMPORT_LDIF_MIGRATE_EXCLUDED_TASK_ERR.get(e
                    .getMessage());
            logError(message);
            isCanceled =true;
            throw e;
@@ -1327,7 +1330,6 @@
    }
  }
  /**
   * Task to migrate existing entries.
   */
@@ -1340,35 +1342,37 @@
    @Override
    public Void call() throws Exception
    {
      for(Suffix suffix : dnSuffixMap.values()) {
      for (Suffix suffix : dnSuffixMap.values())
      {
        List<byte[]> includeBranches =
            new ArrayList<byte[]>(suffix.getIncludeBranches().size());
        for(DN includeBranch : suffix.getIncludeBranches())
        {
          if(includeBranch.isDescendantOf(suffix.getBaseDN()))
          {
            includeBranches.add(JebFormat.dnToDNKey(
                includeBranch, suffix.getBaseDN().getNumComponents()));
            includeBranches.add(JebFormat.dnToDNKey(includeBranch, suffix
                .getBaseDN().getNumComponents()));
          }
        }
        EntryContainer entryContainer = suffix.getSrcEntryContainer();
        if(entryContainer != null &&
                !suffix.getIncludeBranches().isEmpty()) {
        if (entryContainer != null && !suffix.getIncludeBranches().isEmpty())
        {
          DatabaseEntry key = new DatabaseEntry();
          DatabaseEntry data = new DatabaseEntry();
          LockMode lockMode = LockMode.DEFAULT;
          OperationStatus status;
          Message message = NOTE_JEB_IMPORT_MIGRATION_START.get(
                  "existing", String.valueOf(suffix.getBaseDN()));
          Message message =
              NOTE_JEB_IMPORT_MIGRATION_START.get("existing", String
                  .valueOf(suffix.getBaseDN()));
          logError(message);
          Cursor cursor =
                  entryContainer.getDN2ID().openCursor(null,
                          null);
          try {
          Cursor cursor = entryContainer.getDN2ID().openCursor(null, null);
          try
          {
            status = cursor.getFirst(key, data, lockMode);
            while(status == OperationStatus.SUCCESS &&
                    !importConfiguration.isCancelled() && !isCanceled) {
            while (status == OperationStatus.SUCCESS
                && !importConfiguration.isCancelled() && !isCanceled)
            {
              boolean found = false;
              for(byte[] includeBranch : includeBranches)
@@ -1379,28 +1383,29 @@
                  break;
                }
              }
              if(!found) {
              if (!found)
              {
                EntryID id = new EntryID(data);
                Entry entry =
                        entryContainer.getID2Entry().get(null,
                                id, LockMode.DEFAULT);
                    entryContainer.getID2Entry()
                        .get(null, id, LockMode.DEFAULT);
                processEntry(entry, rootContainer.getNextEntryID(),suffix);
                migratedCount++;
                status = cursor.getNext(key, data, lockMode);
              }  else {
              }
              else
              {
                // This is the base entry for a branch that will be included
                // in the import so we don't want to copy the branch to the
                //  new entry container.
                /**
                 * Advance the cursor to next entry at the same level in the
                 * DIT
                 * skipping all the entries in this branch.
                 * Set the next starting value to a value of equal length but
                 * slightly greater than the previous DN. Since keys are
                 * compared in reverse order we must set the first byte
                 * (the comma).
                 * No possibility of overflow here.
                 * Advance the cursor to next entry at the same level in the DIT
                 * skipping all the entries in this branch. Set the next
                 * starting value to a value of equal length but slightly
                 * greater than the previous DN. Since keys are compared in
                 * reverse order we must set the first byte (the comma). No
                 * possibility of overflow here.
                 */
                byte[] begin = Arrays.copyOf(key.getData(), key.getSize()+1);
                begin[begin.length-1] = 0x01;
@@ -1413,7 +1418,8 @@
          catch(Exception e)
          {
            message =
              ERR_JEB_IMPORT_LDIF_MIGRATE_EXISTING_TASK_ERR.get(e.getMessage());
                ERR_JEB_IMPORT_LDIF_MIGRATE_EXISTING_TASK_ERR.get(e
                    .getMessage());
            logError(message);
            isCanceled =true;
            throw e;
@@ -1439,7 +1445,6 @@
    private Entry oldEntry;
    private EntryID entryID;
    /**
     * {@inheritDoc}
     */
@@ -1479,10 +1484,8 @@
      return null;
    }
    void processEntry(Entry entry, Suffix suffix)
            throws DatabaseException, DirectoryException,
            JebException, InterruptedException
    void processEntry(Entry entry, Suffix suffix) throws DatabaseException,
        DirectoryException, JebException, InterruptedException
    {
      DN entryDN = entry.getDN();
@@ -1523,79 +1526,21 @@
      importCount.getAndIncrement();
    }
    void
    processAllIndexes(Suffix suffix, Entry entry, EntryID entryID) throws
            DatabaseException, DirectoryException, JebException,
    void processAllIndexes(Suffix suffix, Entry entry, EntryID entryID)
        throws DatabaseException, DirectoryException, JebException,
            InterruptedException
    {
      for(Map.Entry<AttributeType, AttributeIndex> mapEntry :
              suffix.getAttrIndexMap().entrySet()) {
      for (Map.Entry<AttributeType, AttributeIndex> mapEntry : suffix
          .getAttrIndexMap().entrySet())
      {
        AttributeType attributeType = mapEntry.getKey();
          AttributeIndex attributeIndex = mapEntry.getValue();
          Index index;
          if((index=attributeIndex.getEqualityIndex()) != null) {
            processAttribute(index, entry, entryID,
            new IndexKey(attributeType, ImportIndexType.EQUALITY,
                         index.getIndexEntryLimit()));
          }
          if((index=attributeIndex.getPresenceIndex()) != null) {
            processAttribute(index, entry, entryID,
                      new IndexKey(attributeType, ImportIndexType.PRESENCE,
                                   index.getIndexEntryLimit()));
          }
          if((index=attributeIndex.getSubstringIndex()) != null) {
            processAttribute(index, entry, entryID,
                new IndexKey(attributeType, ImportIndexType.SUBSTRING,
                             index.getIndexEntryLimit()));
          }
          if((index=attributeIndex.getOrderingIndex()) != null) {
            processAttribute(index, entry, entryID,
                      new IndexKey(attributeType, ImportIndexType.ORDERING,
                                  index.getIndexEntryLimit()));
          }
          if((index=attributeIndex.getApproximateIndex()) != null) {
            processAttribute(index, entry, entryID,
                      new IndexKey(attributeType, ImportIndexType.APPROXIMATE,
                                   index.getIndexEntryLimit()));
          }
          for(VLVIndex vlvIdx : suffix.getEntryContainer().getVLVIndexes()) {
            Transaction transaction = null;
            vlvIdx.addEntry(transaction, entryID, entry);
          }
          Map<String,Collection<Index>> extensibleMap =
                  attributeIndex.getExtensibleIndexes();
          if(!extensibleMap.isEmpty()) {
            Collection<Index> subIndexes =
                    attributeIndex.getExtensibleIndexes().get(
                            EXTENSIBLE_INDEXER_ID_SUBSTRING);
            if(subIndexes != null) {
              for(Index subIndex: subIndexes) {
                processAttribute(subIndex, entry, entryID,
                     new IndexKey(attributeType, ImportIndexType.EX_SUBSTRING,
                                  subIndex.getIndexEntryLimit()));
        fillIndexKey(suffix, mapEntry, entry, attributeType, entryID);
              }
            }
            Collection<Index> sharedIndexes =
                    attributeIndex.getExtensibleIndexes().get(
                            EXTENSIBLE_INDEXER_ID_SHARED);
            if(sharedIndexes !=null) {
              for(Index sharedIndex:sharedIndexes) {
                processAttribute(sharedIndex, entry, entryID,
                       new IndexKey(attributeType, ImportIndexType.EX_SHARED,
                                    sharedIndex.getIndexEntryLimit()));
              }
            }
          }
      }
    }
    @Override
    void processAttribute(Index index, Entry entry, EntryID entryID,
                   IndexKey indexKey) throws DatabaseException,
            InterruptedException
        IndexKey indexKey) throws DatabaseException, InterruptedException
    {
      if(oldEntry != null)
      {
@@ -1615,7 +1560,6 @@
    }
  }
  /**
   * This task performs phase reading and processing of the entries read from
   * the LDIF file(s). This task is used if the append flag wasn't specified.
@@ -1629,7 +1573,6 @@
    private DatabaseEntry keyEntry = new DatabaseEntry(),
                          valEntry = new DatabaseEntry();
    /**
     * {@inheritDoc}
     */
@@ -1668,10 +1611,9 @@
      return null;
    }
    void processEntry(Entry entry, EntryID entryID, Suffix suffix)
            throws DatabaseException, DirectoryException,
            JebException, InterruptedException
        throws DatabaseException, DirectoryException, JebException,
        InterruptedException
    {
      DN entryDN = entry.getDN();
@@ -1697,8 +1639,10 @@
    {
      //Perform parent checking.
      DN parentDN = suffix.getEntryContainer().getParentWithinBase(entryDN);
      if (parentDN != null) {
        if (!suffix.isParentProcessed(parentDN, tmpEnv, clearedBackend)) {
      if (parentDN != null)
      {
        if (!suffix.isParentProcessed(parentDN, tmpEnv, clearedBackend))
        {
          Message message =
                  ERR_JEB_IMPORT_PARENT_NOT_FOUND.get(parentDN.toString());
          reader.rejectEntry(entry, message);
@@ -1727,80 +1671,91 @@
      return true;
    }
    void
    processIndexes(Suffix suffix, Entry entry, EntryID entryID) throws
            DatabaseException, DirectoryException, JebException,
    void processIndexes(Suffix suffix, Entry entry, EntryID entryID)
        throws DatabaseException, DirectoryException, JebException,
            InterruptedException
    {
      for(Map.Entry<AttributeType, AttributeIndex> mapEntry :
              suffix.getAttrIndexMap().entrySet()) {
      for (Map.Entry<AttributeType, AttributeIndex> mapEntry : suffix
          .getAttrIndexMap().entrySet())
      {
        AttributeType attributeType = mapEntry.getKey();
        if(entry.hasAttribute(attributeType)) {
        if (entry.hasAttribute(attributeType))
        {
          fillIndexKey(suffix, mapEntry, entry, attributeType, entryID);
        }
      }
    }
    void fillIndexKey(Suffix suffix,
        Map.Entry<AttributeType, AttributeIndex> mapEntry, Entry entry,
        AttributeType attributeType, EntryID entryID) throws DatabaseException,
        InterruptedException, DirectoryException, JebException
    {
          AttributeIndex attributeIndex = mapEntry.getValue();
          Index index;
          if((index=attributeIndex.getEqualityIndex()) != null) {
            processAttribute(index, entry, entryID,
                      new IndexKey(attributeType, ImportIndexType.EQUALITY,
                                   index.getIndexEntryLimit()));
      if ((index = attributeIndex.getEqualityIndex()) != null)
      {
        processAttribute(index, entry, entryID, new IndexKey(attributeType,
            ImportIndexType.EQUALITY, index.getIndexEntryLimit()));
          }
          if((index=attributeIndex.getPresenceIndex()) != null) {
            processAttribute(index, entry, entryID,
                      new IndexKey(attributeType, ImportIndexType.PRESENCE,
                                   index.getIndexEntryLimit()));
      if ((index = attributeIndex.getPresenceIndex()) != null)
      {
        processAttribute(index, entry, entryID, new IndexKey(attributeType,
            ImportIndexType.PRESENCE, index.getIndexEntryLimit()));
          }
          if((index=attributeIndex.getSubstringIndex()) != null) {
            processAttribute(index, entry, entryID,
                new IndexKey(attributeType, ImportIndexType.SUBSTRING,
                             index.getIndexEntryLimit()));
      if ((index = attributeIndex.getSubstringIndex()) != null)
      {
        processAttribute(index, entry, entryID, new IndexKey(attributeType,
            ImportIndexType.SUBSTRING, index.getIndexEntryLimit()));
          }
          if((index=attributeIndex.getOrderingIndex()) != null) {
            processAttribute(index, entry, entryID,
                      new IndexKey(attributeType, ImportIndexType.ORDERING,
                                   index.getIndexEntryLimit()));
      if ((index = attributeIndex.getOrderingIndex()) != null)
      {
        processAttribute(index, entry, entryID, new IndexKey(attributeType,
            ImportIndexType.ORDERING, index.getIndexEntryLimit()));
          }
          if((index=attributeIndex.getApproximateIndex()) != null) {
            processAttribute(index, entry, entryID,
                      new IndexKey(attributeType, ImportIndexType.APPROXIMATE,
                                   index.getIndexEntryLimit()));
      if ((index = attributeIndex.getApproximateIndex()) != null)
      {
        processAttribute(index, entry, entryID, new IndexKey(attributeType,
            ImportIndexType.APPROXIMATE, index.getIndexEntryLimit()));
          }
          for(VLVIndex vlvIdx : suffix.getEntryContainer().getVLVIndexes()) {
      for (VLVIndex vlvIdx : suffix.getEntryContainer().getVLVIndexes())
      {
            Transaction transaction = null;
            vlvIdx.addEntry(transaction, entryID, entry);
          }
          Map<String,Collection<Index>> extensibleMap =
                  attributeIndex.getExtensibleIndexes();
          if(!extensibleMap.isEmpty()) {
      if (!extensibleMap.isEmpty())
      {
            Collection<Index> subIndexes =
                    attributeIndex.getExtensibleIndexes().get(
                            EXTENSIBLE_INDEXER_ID_SUBSTRING);
            if(subIndexes != null) {
              for(Index subIndex: subIndexes) {
                processAttribute(subIndex, entry, entryID,
                     new IndexKey(attributeType, ImportIndexType.EX_SUBSTRING,
                                  subIndex.getIndexEntryLimit()));
        if (subIndexes != null)
        {
          for (Index subIndex : subIndexes)
          {
            processAttribute(subIndex, entry, entryID, new IndexKey(
                attributeType, ImportIndexType.EX_SUBSTRING, subIndex
                    .getIndexEntryLimit()));
              }
            }
            Collection<Index> sharedIndexes =
                    attributeIndex.getExtensibleIndexes().get(
                            EXTENSIBLE_INDEXER_ID_SHARED);
            if(sharedIndexes !=null) {
              for(Index sharedIndex:sharedIndexes) {
                processAttribute(sharedIndex, entry, entryID,
                        new IndexKey(attributeType, ImportIndexType.EX_SHARED,
                                     sharedIndex.getIndexEntryLimit()));
        if (sharedIndexes != null)
        {
          for (Index sharedIndex : sharedIndexes)
          {
            processAttribute(sharedIndex, entry, entryID, new IndexKey(
                attributeType, ImportIndexType.EX_SHARED, sharedIndex
                    .getIndexEntryLimit()));
              }
            }
          }
        }
      }
    }
   void processAttribute(Index index, Entry entry, EntryID entryID,
                           IndexKey indexKey) throws DatabaseException,
            InterruptedException
        IndexKey indexKey) throws DatabaseException, InterruptedException
    {
      insertKeySet.clear();
      index.indexer.indexEntry(entry, insertKeySet);
@@ -1810,9 +1765,7 @@
      }
    }
    void flushIndexBuffers() throws InterruptedException,
                 ExecutionException
    void flushIndexBuffers() throws InterruptedException, ExecutionException
    {
       Set<Map.Entry<IndexKey, IndexOutputBuffer>> set =
         indexBufferMap.entrySet();
@@ -1833,12 +1786,9 @@
        }
    }
    int
    processKey(DatabaseContainer container, byte[] key, EntryID entryID,
    int processKey(DatabaseContainer container, byte[] key, EntryID entryID,
         IndexOutputBuffer.ComparatorBuffer<byte[]> comparator,
         IndexKey indexKey, boolean insert)
         throws InterruptedException
        IndexKey indexKey, boolean insert) throws InterruptedException
    {
      IndexOutputBuffer indexBuffer = indexBufferMap.get(indexKey);
      if (indexBuffer == null)
@@ -1862,38 +1812,38 @@
      return id;
    }
    IndexOutputBuffer getNewIndexBuffer() throws InterruptedException
    {
      IndexOutputBuffer indexBuffer = freeBufferQueue.take();
        if(indexBuffer == null)
        {
         Message message = Message.raw(Category.JEB, Severity.SEVERE_ERROR,
        Message message =
            Message.raw(Category.JEB, Severity.SEVERE_ERROR,
                                       "Index buffer processing error.");
           throw new InterruptedException(message.toString());
        }
        if(indexBuffer.isPoison())
        {
          Message message = Message.raw(Category.JEB, Severity.SEVERE_ERROR,
        Message message =
            Message.raw(Category.JEB, Severity.SEVERE_ERROR,
                  "Cancel processing received.");
          throw new InterruptedException(message.toString());
        }
      return indexBuffer;
    }
    void processDN2ID(Suffix suffix, DN dn, EntryID entryID)
            throws InterruptedException
    {
      DN2ID dn2id = suffix.getDN2ID();
      byte[] dnBytes =
          JebFormat.dnToDNKey(dn, suffix.getBaseDN().getNumComponents());
      int id = processKey(dn2id, dnBytes, entryID, indexComparator,
                 new IndexKey(dnType, ImportIndexType.DN, 1), true);
      int id =
          processKey(dn2id, dnBytes, entryID, indexComparator, new IndexKey(
              dnType, ImportIndexType.DN, 1), true);
      idECMap.putIfAbsent(id, suffix.getEntryContainer());
    }
    void processDN2URI(Suffix suffix, Entry oldEntry, Entry newEntry)
            throws DatabaseException
    {
@@ -1909,11 +1859,10 @@
    }
  }
  /**
   * This task reads sorted records from the temporary index scratch files,
   * processes the records and writes the results to the index database. The
   * DN index is treated differently then non-DN indexes.
   * processes the records and writes the results to the index database. The DN
   * index is treated differently then non-DN indexes.
   */
  private final class IndexDBWriteTask implements Callable<Void>
  {
@@ -1937,7 +1886,6 @@
    private int ownedPermits;
    private volatile boolean isRunning = false;
    /**
     * Creates a new index DB writer.
     *
@@ -1963,8 +1911,6 @@
      this.dbValue = new DatabaseEntry();
    }
    /**
     * Initializes this task.
     *
@@ -1974,8 +1920,9 @@
    public void beginWriteTask() throws IOException
    {
      bufferFile = new RandomAccessFile(indexMgr.getBufferFile(), "r");
      bufferIndexFile = new DataInputStream(new BufferedInputStream(
          new FileInputStream(indexMgr.getBufferIndexFile())));
      bufferIndexFile =
          new DataInputStream(new BufferedInputStream(new FileInputStream(
              indexMgr.getBufferIndexFile())));
      remainingBuffers = indexMgr.getNumberOfBuffers();
      totalBatches = (remainingBuffers / maxPermits) + 1;
@@ -1983,16 +1930,15 @@
      nextBufferID = 0;
      ownedPermits = 0;
      Message message = NOTE_JEB_IMPORT_LDIF_INDEX_STARTED.get(
          indexMgr.getBufferFileName(), remainingBuffers, totalBatches);
      Message message =
          NOTE_JEB_IMPORT_LDIF_INDEX_STARTED.get(indexMgr.getBufferFileName(),
              remainingBuffers, totalBatches);
      logError(message);
      indexMgr.setIndexDBWriteTask(this);
      isRunning = true;
    }
    /**
     * Returns the next batch of buffers to be processed, blocking until enough
     * buffer permits are available.
@@ -2033,17 +1979,15 @@
      {
        final long bufferBegin = bufferIndexFile.readLong();
        final long bufferEnd = bufferIndexFile.readLong();
        final IndexInputBuffer b = new IndexInputBuffer(indexMgr,
            bufferFile.getChannel(), bufferBegin, bufferEnd, nextBufferID++,
            cacheSize);
        final IndexInputBuffer b =
            new IndexInputBuffer(indexMgr, bufferFile.getChannel(),
                bufferBegin, bufferEnd, nextBufferID++, cacheSize);
        buffers.add(b);
      }
      return buffers;
    }
    /**
     * Finishes this task.
     */
@@ -2068,8 +2012,8 @@
          }
          if(!isCanceled)
          {
            Message msg = NOTE_JEB_IMPORT_LDIF_DN_CLOSE
                .get(indexMgr.getDNCount());
            Message msg =
                NOTE_JEB_IMPORT_LDIF_DN_CLOSE.get(indexMgr.getDNCount());
            logError(msg);
          }
        }
@@ -2081,7 +2025,8 @@
          }
          if(!isCanceled)
          {
            Message message = NOTE_JEB_IMPORT_LDIF_INDEX_CLOSE.get(indexMgr
            Message message =
                NOTE_JEB_IMPORT_LDIF_INDEX_CLOSE.get(indexMgr
                .getBufferFileName());
            logError(message);
          }
@@ -2118,8 +2063,6 @@
      }
    }
    /**
     * Print out progress stats.
     *
@@ -2135,15 +2078,16 @@
        final int currentBatch = batchNumber.get();
        final long bytesReadInterval = tmpBytesRead - lastBytesRead;
        final int bytesReadPercent = Math.round((100f * tmpBytesRead)
            / bufferFileSize);
        final int bytesReadPercent =
            Math.round((100f * tmpBytesRead) / bufferFileSize);
        // Kilo and milli approximately cancel out.
        final long kiloBytesRate = bytesReadInterval / deltaTime;
        final long kiloBytesRemaining = (bufferFileSize - tmpBytesRead) / 1024;
        Message message = NOTE_JEB_IMPORT_LDIF_PHASE_TWO_REPORT.get(
            indexMgr.getBufferFileName(), bytesReadPercent, kiloBytesRemaining,
        Message message =
            NOTE_JEB_IMPORT_LDIF_PHASE_TWO_REPORT.get(indexMgr
                .getBufferFileName(), bytesReadPercent, kiloBytesRemaining,
            kiloBytesRate, currentBatch, totalBatches);
        logError(message);
@@ -2151,8 +2095,6 @@
      }
    }
    /**
     * {@inheritDoc}
     */
@@ -2266,8 +2208,9 @@
      }
      catch (Exception e)
      {
        Message message = ERR_JEB_IMPORT_LDIF_INDEX_WRITE_DB_ERR.get(
            indexMgr.getBufferFileName(), e.getMessage());
        Message message =
            ERR_JEB_IMPORT_LDIF_INDEX_WRITE_DB_ERR.get(indexMgr
                .getBufferFileName(), e.getMessage());
        logError(message);
        throw e;
      }
@@ -2278,7 +2221,6 @@
      return null;
    }
    private void addToDB(ImportIDSet insertSet, ImportIDSet deleteSet,
                         int indexID)
    {
@@ -2287,8 +2229,8 @@
        Index index;
        if((deleteSet.size() > 0) || (!deleteSet.isDefined()))
        {
          dbKey.setData(deleteSet.getKey().array(), 0,
                        deleteSet.getKey().limit());
          dbKey.setData(deleteSet.getKey().array(), 0, deleteSet.getKey()
              .limit());
          index =  (Index)idContainerMap.get(indexID);
          index.delete(dbKey, deleteSet, dbValue);
          if(!indexMap.containsKey(indexID))
@@ -2298,8 +2240,8 @@
        }
        if((insertSet.size() > 0) || (!insertSet.isDefined()))
        {
          dbKey.setData(insertSet.getKey().array(), 0,
                        insertSet.getKey().limit());
          dbKey.setData(insertSet.getKey().array(), 0, insertSet.getKey()
              .limit());
          index =  (Index)idContainerMap.get(indexID);
          index.insert(dbKey, insertSet, dbValue);
          if(!indexMap.containsKey(indexID))
@@ -2314,7 +2256,6 @@
      }
    }
    private void addDN2ID(ImportIDSet record, Integer indexID)
    {
      DNState dnState;
@@ -2334,15 +2275,11 @@
      dnState.writeToDB();
    }
    private void addBytesRead(int bytesRead)
    {
      this.bytesRead.addAndGet(bytesRead);
    }
    /**
     * This class is used to by a index DB merge thread performing DN processing
     * to keep track of the state of individual DN2ID index processing.
@@ -2361,7 +2298,6 @@
      private final int childLimit, subTreeLimit;
      private final boolean childDoCount, subTreeDoCount;
      DNState(EntryContainer entryContainer)
      {
        this.entryContainer = entryContainer;
@@ -2381,7 +2317,6 @@
        lastDN = ByteBuffer.allocate(BYTE_BUFFER_CAPACITY);
      }
      private ByteBuffer getParent(ByteBuffer buffer)
      {
        int parentIndex =
@@ -2398,12 +2333,11 @@
      private ByteBuffer deepCopy(ByteBuffer srcBuffer, ByteBuffer destBuffer)
      {
        if(destBuffer == null ||
           destBuffer.clear().remaining() < srcBuffer.limit())
        if (destBuffer == null
            || destBuffer.clear().remaining() < srcBuffer.limit())
        {
          byte[] bytes = new byte[srcBuffer.limit()];
          System.arraycopy(srcBuffer.array(), 0, bytes, 0,
                           srcBuffer.limit());
          System.arraycopy(srcBuffer.array(), 0, bytes, 0, srcBuffer.limit());
          return ByteBuffer.wrap(bytes);
        }
        else
@@ -2428,8 +2362,8 @@
        //Bypass the cache for append data, lookup the parent in DN2ID and
        //return.
        if(importConfiguration != null &&
           importConfiguration.appendToExistingData())
        if (importConfiguration != null
            && importConfiguration.appendToExistingData())
        {
          //If null is returned than this is a suffix DN.
          if(parentDN != null)
@@ -2482,7 +2416,8 @@
            {
              EntryID newParentID = parentIDMap.get(parentDN);
              ByteBuffer key = parentIDMap.lastKey();
              while(!parentDN.equals(key)) {
              while (!parentDN.equals(key))
              {
                parentIDMap.remove(key);
                key = parentIDMap.lastKey();
              }
@@ -2503,7 +2438,6 @@
        return true;
      }
      private void id2child(EntryID childID)
      {
        ImportIDSet idSet;
@@ -2523,21 +2457,20 @@
        }
      }
      private EntryID getParentID(ByteBuffer dn) throws DatabaseException
      {
        EntryID nodeID;
        //Bypass the cache for append data, lookup the parent DN in the DN2ID
        //db.
        if (importConfiguration != null &&
            importConfiguration.appendToExistingData())
        if (importConfiguration != null
            && importConfiguration.appendToExistingData())
        {
            DatabaseEntry key = new DatabaseEntry(dn.array(), 0, dn.limit());
            DatabaseEntry value = new DatabaseEntry();
            OperationStatus status;
            status =
                entryContainer.getDN2ID().read(null, key, value,
                                               LockMode.DEFAULT);
              entryContainer.getDN2ID()
                  .read(null, key, value, LockMode.DEFAULT);
            if(status == OperationStatus.SUCCESS)
            {
              nodeID = new EntryID(value);
@@ -2554,7 +2487,6 @@
        return nodeID;
      }
      private void id2SubTree(EntryID childID)
      {
        ImportIDSet idSet;
@@ -2570,8 +2502,8 @@
        idSet.addEntryID(childID);
        // TODO:
        //  Instead of doing this, we can just walk to parent cache if available
        for (ByteBuffer dn = getParent(parentDN); dn != null;
             dn = getParent(dn))
        for (ByteBuffer dn = getParent(parentDN); dn != null; dn =
            getParent(dn))
        {
          EntryID nodeID = getParentID(dn);
          if(nodeID == null)
@@ -2597,7 +2529,6 @@
        }
      }
      public void writeToDB()
      {
        entryContainer.getDN2ID().put(null, dnKey, dnValue);
@@ -2609,7 +2540,6 @@
        }
      }
      private void flushMapToDB(Map<byte[], ImportIDSet> map, Index index,
                                boolean clearMap)
      {
@@ -2627,7 +2557,6 @@
        }
      }
      public void flush()
      {
        flushMapToDB(id2childTree, entryContainer.getID2Children(), false);
@@ -2636,10 +2565,9 @@
    }
  }
  /**
   * This task writes the temporary scratch index files using the sorted
   * buffers read from a blocking queue private to each index.
   * This task writes the temporary scratch index files using the sorted buffers
   * read from a blocking queue private to each index.
   */
  private final class ScratchFileWriterTask implements Callable<Void>
  {
@@ -2658,22 +2586,20 @@
    private final SortedSet<IndexOutputBuffer> indexSortedSet;
    private boolean poisonSeen = false;
    public ScratchFileWriterTask(BlockingQueue<IndexOutputBuffer> queue,
                             IndexManager indexMgr) throws FileNotFoundException
    {
      this.queue = queue;
      this.indexMgr = indexMgr;
      this.bufferStream = new DataOutputStream(new BufferedOutputStream(
          new FileOutputStream(indexMgr.getBufferFile()),
          READER_WRITER_BUFFER_SIZE));
      this.bufferIndexStream = new DataOutputStream(new BufferedOutputStream(
          new FileOutputStream(indexMgr.getBufferIndexFile()),
          READER_WRITER_BUFFER_SIZE));
      this.bufferStream =
          new DataOutputStream(new BufferedOutputStream(new FileOutputStream(
              indexMgr.getBufferFile()), READER_WRITER_BUFFER_SIZE));
      this.bufferIndexStream =
          new DataOutputStream(new BufferedOutputStream(new FileOutputStream(
              indexMgr.getBufferIndexFile()), READER_WRITER_BUFFER_SIZE));
      this.indexSortedSet = new TreeSet<IndexOutputBuffer>();
    }
    /**
     * {@inheritDoc}
     */
@@ -2682,7 +2608,8 @@
    {
      long offset = 0;
      List<IndexOutputBuffer> l = new LinkedList<IndexOutputBuffer>();
      try {
      try
      {
        while(true)
        {
          final IndexOutputBuffer indexBuffer = queue.take();
@@ -2733,7 +2660,8 @@
      }
      catch (IOException e)
      {
        Message message = ERR_JEB_IMPORT_LDIF_INDEX_FILEWRITER_ERR.get(indexMgr
        Message message =
            ERR_JEB_IMPORT_LDIF_INDEX_FILEWRITER_ERR.get(indexMgr
            .getBufferFile().getAbsolutePath(), e.getMessage());
        logError(message);
        isCanceled = true;
@@ -2748,15 +2676,16 @@
      return null;
    }
    private long writeIndexBuffer(IndexOutputBuffer indexBuffer)
      throws IOException
    {
      int numberKeys = indexBuffer.getNumberKeys();
      indexBuffer.setPosition(-1);
      long bufferLen = 0;
      insertByteStream.reset(); insertKeyCount = 0;
      deleteByteStream.reset(); deleteKeyCount = 0;
      insertByteStream.reset();
      insertKeyCount = 0;
      deleteByteStream.reset();
      deleteKeyCount = 0;
      for(int i = 0; i < numberKeys; i++)
      {
        if(indexBuffer.getPosition() == -1)
@@ -2778,8 +2707,10 @@
        {
          bufferLen += writeRecord(indexBuffer);
          indexBuffer.setPosition(i);
          insertByteStream.reset();insertKeyCount = 0;
          deleteByteStream.reset();deleteKeyCount = 0;
          insertByteStream.reset();
          insertKeyCount = 0;
          deleteByteStream.reset();
          deleteKeyCount = 0;
        }
        if(indexBuffer.isInsert(i))
        {
@@ -2801,14 +2732,15 @@
      return bufferLen;
    }
    private long writeIndexBuffers(List<IndexOutputBuffer> buffers)
            throws IOException
    {
      long id = 0;
      long bufferLen = 0;
      insertByteStream.reset(); insertKeyCount = 0;
      deleteByteStream.reset(); deleteKeyCount = 0;
      insertByteStream.reset();
      insertKeyCount = 0;
      deleteByteStream.reset();
      deleteKeyCount = 0;
      for(IndexOutputBuffer b : buffers)
      {
        if(b.isPoison())
@@ -2894,7 +2826,6 @@
      return bufferLen;
    }
    private int writeByteStreams() throws IOException
    {
      if(insertKeyCount > indexMgr.getLimit())
@@ -2921,7 +2852,6 @@
      return insertSize + deleteSize;
    }
    private int writeHeader(int indexID, int keySize) throws IOException
    {
      bufferStream.writeInt(indexID);
@@ -2931,35 +2861,31 @@
      return packedSize;
    }
    private int writeRecord(IndexOutputBuffer b) throws IOException
    {
      int keySize = b.getKeySize();
      int packedSize = writeHeader(b.getIndexID(), keySize);
      b.writeKey(bufferStream);
      packedSize += writeByteStreams();
      return (packedSize + keySize + insertByteStream.size() +
              deleteByteStream.size() + 4);
      return (packedSize + keySize + insertByteStream.size()
          + deleteByteStream.size() + 4);
    }
    private int writeRecord(byte[] k, int indexID) throws IOException
    {
      int packedSize = writeHeader(indexID, k.length);
      bufferStream.write(k);
      packedSize += writeByteStreams();
      return (packedSize + k.length + insertByteStream.size() +
              deleteByteStream.size() + 4);
      return (packedSize + k.length + insertByteStream.size()
          + deleteByteStream.size() + 4);
    }
  }
  /**
   * This task main function is to sort the index buffers given to it from
   * the import tasks reading the LDIF file. It will also create a index
   * file writer task and corresponding queue if needed. The sorted index
   * buffers are put on the index file writer queues for writing to a temporary
   * file.
   * This task main function is to sort the index buffers given to it from the
   * import tasks reading the LDIF file. It will also create a index file writer
   * task and corresponding queue if needed. The sorted index buffers are put on
   * the index file writer queues for writing to a temporary file.
   */
  private final class SortTask implements Callable<Void>
  {
@@ -2986,15 +2912,15 @@
      indexBuffer.sort();
      if (indexKeyQueMap.containsKey(indexBuffer.getIndexKey()))
      {
        BlockingQueue<IndexOutputBuffer> q = indexKeyQueMap.get(indexBuffer
            .getIndexKey());
        BlockingQueue<IndexOutputBuffer> q =
            indexKeyQueMap.get(indexBuffer.getIndexKey());
        q.add(indexBuffer);
      }
      else
      {
        createIndexWriterTask(indexBuffer.getIndexKey());
        BlockingQueue<IndexOutputBuffer> q = indexKeyQueMap.get(indexBuffer
            .getIndexKey());
        BlockingQueue<IndexOutputBuffer> q =
            indexKeyQueMap.get(indexBuffer.getIndexKey());
        q.add(indexBuffer);
      }
      return null;
@@ -3014,7 +2940,8 @@
        {
          isDN = true;
        }
        IndexManager indexMgr = new IndexManager(indexKey.getName(), isDN,
        IndexManager indexMgr =
            new IndexManager(indexKey.getName(), isDN,
                                                 indexKey.getEntryLimit());
        if(isDN)
        {
@@ -3029,22 +2956,18 @@
        ScratchFileWriterTask indexWriter =
                new ScratchFileWriterTask(newQue, indexMgr);
        scratchFileWriterList.add(indexWriter);
        scratchFileWriterFutures.add(
                              scratchFileWriterService.submit(indexWriter));
        scratchFileWriterFutures.add(scratchFileWriterService
            .submit(indexWriter));
        indexKeyQueMap.put(indexKey, newQue);
      }
    }
  }
  /**
   * The index manager class has several functions:
   *
   *   1. It used to carry information about index processing created in phase
   *      one to phase two.
   *
   *   2. It collects statistics about phase two processing for each index.
   *
   *   3. It manages opening and closing the scratch index files.
   * The index manager class has several functions: 1. It used to carry
   * information about index processing created in phase one to phase two. 2. It
   * collects statistics about phase two processing for each index. 3. It
   * manages opening and closing the scratch index files.
   */
  final class IndexManager implements Comparable<IndexManager>
  {
@@ -3078,41 +3001,32 @@
      }
    }
    private void setIndexDBWriteTask(IndexDBWriteTask writer)
    {
      this.writer = writer;
    }
    private File getBufferFile()
    {
      return bufferFile;
    }
    private long getBufferFileSize()
    {
      return bufferFileSize;
    }
    private File getBufferIndexFile()
    {
      return bufferIndexFile;
    }
    private void setBufferInfo(int numberOfBuffers, long bufferFileSize)
    {
      this.numberOfBuffers = numberOfBuffers;
      this.bufferFileSize = bufferFileSize;
    }
    /**
     * Updates the bytes read counter.
     *
@@ -3127,25 +3041,21 @@
      }
    }
    private void addTotDNCount(int delta)
    {
      totalDNS += delta;
    }
    private long getDNCount()
    {
      return totalDNS;
    }
    private boolean isDN2ID()
    {
      return isDN;
    }
    private void printStats(long deltaTime)
    {
      if (writer != null)
@@ -3154,7 +3064,6 @@
      }
    }
    /**
     * Returns the file name associated with this index manager.
     *
@@ -3165,13 +3074,11 @@
      return bufferFileName;
    }
    private int getLimit()
    {
      return limit;
    }
    /**
     * {@inheritDoc}
     */
@@ -3181,20 +3088,17 @@
      return numberOfBuffers - mgr.numberOfBuffers;
    }
    private int getNumberOfBuffers()
    {
      return numberOfBuffers;
    }
  }
  /**
   * The rebuild index manager handles all rebuild index related processing.
   */
  private class RebuildIndexManager extends ImportTask
      implements DiskSpaceMonitorHandler
  private class RebuildIndexManager extends ImportTask implements
      DiskSpaceMonitorHandler
  {
   //Rebuild index configuration.
@@ -3232,13 +3136,14 @@
   //The entry container.
   private EntryContainer entryContainer;
    /**
     * Create an instance of the rebuild index manager using the specified
     * parameters.
     *
     * @param rebuildConfig  The rebuild configuration to use.
     * @param cfg The local DB configuration to use.
     * @param rebuildConfig
     *          The rebuild configuration to use.
     * @param cfg
     *          The local DB configuration to use.
     */
    public RebuildIndexManager(RebuildConfig rebuildConfig,
                               LocalDBBackendCfg cfg)
@@ -3247,12 +3152,13 @@
      this.cfg = cfg;
    }
    /**
     * Initialize a rebuild index manager.
     *
     * @throws ConfigException If an configuration error occurred.
     * @throws InitializationException If an initialization error occurred.
     * @throws ConfigException
     *           If an configuration error occurred.
     * @throws InitializationException
     *           If an initialization error occurred.
     */
    public void initialize() throws ConfigException, InitializationException
    {
@@ -3261,17 +3167,18 @@
      suffix = Suffix.createSuffixContext(entryContainer, null, null, null);
      if(suffix == null)
      {
        Message msg = ERR_JEB_REBUILD_SUFFIX_ERROR.get(rebuildConfig.
                getBaseDN().toString());
        Message msg =
            ERR_JEB_REBUILD_SUFFIX_ERROR.get(rebuildConfig.getBaseDN()
                .toString());
        throw new InitializationException(msg);
      }
    }
    /**
     * Print start message.
     *
     * @throws DatabaseException If an database error occurred.
     * @throws DatabaseException
     *           If an database error occurred.
     */
    public void printStartMessage() throws DatabaseException
    {
@@ -3288,7 +3195,8 @@
      totalEntries = suffix.getID2Entry().getRecordCount();
      Message message;
      switch (rebuildConfig.getRebuildMode()) {
      switch (rebuildConfig.getRebuildMode())
      {
      case ALL:
        message = NOTE_JEB_REBUILD_ALL_START.get(totalEntries);
        break;
@@ -3302,11 +3210,11 @@
      logError(message);
    }
    /**
     * Print stop message.
     *
     * @param startTime The time the rebuild started.
     * @param startTime
     *          The time the rebuild started.
     */
    public void printStopMessage(long startTime)
    {
@@ -3323,7 +3231,6 @@
      logError(message);
    }
    /**
     * {@inheritDoc}
     */
@@ -3335,7 +3242,8 @@
          id2entry.openCursor(DiskOrderedCursorConfig.DEFAULT);
      DatabaseEntry key = new DatabaseEntry();
      DatabaseEntry data = new DatabaseEntry();
      try {
      try
      {
        while (cursor.getNext(key, data, null) == OperationStatus.SUCCESS)
        {
          if(isCanceled)
@@ -3343,8 +3251,8 @@
            return null;
          }
          EntryID entryID = new EntryID(key);
          Entry entry = ID2Entry.entryFromDatabase(
                  ByteString.wrap(data.getData()),
          Entry entry =
              ID2Entry.entryFromDatabase(ByteString.wrap(data.getData()),
                  entryContainer.getRootContainer().getCompressedSchema());
          processEntry(entry, entryID);
          entriesProcessed.getAndIncrement();
@@ -3367,8 +3275,6 @@
      return null;
    }
    /**
     * Perform rebuild index processing.
     *
@@ -3377,46 +3283,280 @@
     * @throws InterruptedException
     *           If an interrupted error occurred.
     * @throws ExecutionException
     *           If an Excecution error occurred.
     *           If an Execution error occurred.
     * @throws JebException
     *           If an JEB error occurred.
     */
    public void rebuildIndexes() throws
        DatabaseException, InterruptedException, ExecutionException,
        JebException
    public void rebuildIndexes() throws DatabaseException,
        InterruptedException, ExecutionException, JebException
    {
      switch (rebuildConfig.getRebuildMode())
      {
      case ALL:
        setAllIndexesTrusted(false);
        break;
      case DEGRADED:
        // Nothing to do: degraded indexes are already untrusted.
        break;
      default:
        setRebuildListIndexesTrusted(false);
        break;
      }
      // Sets only the needed indexes.
      setIndexesListsToBeRebuilt();
      if (!rebuildConfig.isClearDegradedState())
      {
        // If not in a 'clear degraded state' operation,
        // need to rebuild the indexes.
        setRebuildListIndexesTrusted(false);
        clearIndexes(true);
      phaseOne();
      if (isCanceled)
      {
        throw new InterruptedException("Rebuild Index canceled.");
      }
      phaseTwo();
      }
      switch (rebuildConfig.getRebuildMode())
      setRebuildListIndexesTrusted(true);
    }
    @SuppressWarnings("fallthrough")
    private void setIndexesListsToBeRebuilt() throws JebException
    {
      // Depends on rebuild mode, (re)building indexes' lists.
      final RebuildMode mode = rebuildConfig.getRebuildMode();
      switch (mode)
      {
      case ALL:
        rebuildIndexMap(false);
        // falls through
      case DEGRADED:
        setAllIndexesTrusted(true);
        if ((mode == RebuildMode.ALL)
            || (!entryContainer.getID2Children().isTrusted() || !entryContainer
                .getID2Subtree().isTrusted()))
        {
          dn2id = entryContainer.getDN2ID();
        }
        if ((mode == RebuildMode.ALL) || entryContainer.getDN2URI() == null)
        {
          dn2uri = entryContainer.getDN2URI();
        }
        if ((mode == RebuildMode.DEGRADED)
            || entryContainer.getAttributeIndexes().isEmpty())
        {
          rebuildIndexMap(true); // only degraded.
        }
        if ((mode == RebuildMode.ALL) || vlvIndexes.isEmpty())
        {
          vlvIndexes.addAll(new LinkedList<VLVIndex>(entryContainer
              .getVLVIndexes()));
        }
        break;
      case USER_DEFINED:
        // false may be required if the user wants to rebuild specific index.
        rebuildIndexMap(false);
        break;
      default:
        setRebuildListIndexesTrusted(true);
        break;
      }
    }
    private void rebuildIndexMap(final boolean onlyDegraded)
    {
      // rebuildList contains the user-selected index(in USER_DEFINED mode).
      final List<String> rebuildList = rebuildConfig.getRebuildList();
      for (final Map.Entry<AttributeType, AttributeIndex> mapEntry : suffix
          .getAttrIndexMap().entrySet())
      {
        final AttributeType attributeType = mapEntry.getKey();
        final AttributeIndex attributeIndex = mapEntry.getValue();
        if (rebuildConfig.getRebuildMode() == RebuildMode.ALL
            || rebuildConfig.getRebuildMode() == RebuildMode.DEGRADED)
        {
          // Get all existing indexes for all && degraded mode.
          rebuildAttributeIndexes(attributeIndex, attributeType, onlyDegraded);
        }
        else
        {
          // Get indexes for user defined index.
          if (!rebuildList.isEmpty())
          {
            for (final String index : rebuildList)
            {
              if (attributeType.getNameOrOID().toLowerCase().equals(
                  index.toLowerCase()))
              {
                rebuildAttributeIndexes(attributeIndex, attributeType,
                    onlyDegraded);
              }
            }
          }
        }
      }
    }
    private void rebuildAttributeIndexes(final AttributeIndex attrIndex,
        final AttributeType attrType, final boolean onlyDegraded)
        throws DatabaseException
    {
      if (attrIndex.getSubstringIndex() != null)
      {
        fillIndexMap(attrType, attrIndex.getSubstringIndex(),
            ImportIndexType.SUBSTRING, onlyDegraded);
      }
      if (attrIndex.getOrderingIndex() != null)
      {
        fillIndexMap(attrType, attrIndex.getOrderingIndex(),
            ImportIndexType.ORDERING, onlyDegraded);
      }
      if (attrIndex.getEqualityIndex() != null)
      {
        fillIndexMap(attrType, attrIndex.getEqualityIndex(),
            ImportIndexType.EQUALITY, onlyDegraded);
      }
      if (attrIndex.getPresenceIndex() != null)
      {
        fillIndexMap(attrType, attrIndex.getPresenceIndex(),
            ImportIndexType.PRESENCE, onlyDegraded);
      }
      if (attrIndex.getApproximateIndex() != null)
      {
        fillIndexMap(attrType, attrIndex.getApproximateIndex(),
            ImportIndexType.APPROXIMATE, onlyDegraded);
      }
      final Map<String, Collection<Index>> extensibleMap =
          attrIndex.getExtensibleIndexes();
      if (!extensibleMap.isEmpty())
      {
        final Collection<Index> subIndexes =
            attrIndex.getExtensibleIndexes().get(
                EXTENSIBLE_INDEXER_ID_SUBSTRING);
        if (subIndexes != null && !subIndexes.isEmpty())
        {
          final List<Index> mutableCopy = new LinkedList<Index>(subIndexes);
          final Iterator<Index> i = mutableCopy.iterator();
          while (i.hasNext())
          {
            final Index subIndex = i.next();
            if (!onlyDegraded || !subIndex.isTrusted())
            {
              if ((rebuildConfig.isClearDegradedState() && subIndex
                  .getRecordCount() == 0)
                  || !rebuildConfig.isClearDegradedState())
              {
                int id = System.identityHashCode(subIndex);
                idContainerMap.putIfAbsent(id, subIndex);
              }
            }
            else
            {
              // This index is not a candidate for rebuilding.
              i.remove();
            }
          }
          if (!mutableCopy.isEmpty())
          {
            extensibleIndexMap.put(new IndexKey(attrType,
                ImportIndexType.EX_SUBSTRING, 0), mutableCopy);
          }
        }
        final Collection<Index> sharedIndexes =
            attrIndex.getExtensibleIndexes().get(EXTENSIBLE_INDEXER_ID_SHARED);
        if (sharedIndexes != null && !sharedIndexes.isEmpty())
        {
          final List<Index> mutableCopy = new LinkedList<Index>(sharedIndexes);
          final Iterator<Index> i = mutableCopy.iterator();
          while (i.hasNext())
          {
            final Index sharedIndex = i.next();
            if (!onlyDegraded || !sharedIndex.isTrusted())
            {
              if ((rebuildConfig.isClearDegradedState() && sharedIndex
                  .getRecordCount() == 0)
                  || !rebuildConfig.isClearDegradedState())
              {
                int id = System.identityHashCode(sharedIndex);
                idContainerMap.putIfAbsent(id, sharedIndex);
              }
            }
            else
            {
              // This index is not a candidate for rebuilding.
              i.remove();
            }
          }
          if (!mutableCopy.isEmpty())
          {
            extensibleIndexMap.put(new IndexKey(attrType,
                ImportIndexType.EX_SHARED, 0), mutableCopy);
          }
        }
      }
    }
    private void fillIndexMap(final AttributeType attrType,
        final Index partialAttrIndex, final ImportIndexType importIndexType,
        final boolean onlyDegraded)
    {
      if ((!onlyDegraded || !partialAttrIndex.isTrusted()))
      {
        if ((rebuildConfig.isClearDegradedState() && partialAttrIndex
            .getRecordCount() == 0)
            || !rebuildConfig.isClearDegradedState())
        {
          final int id = System.identityHashCode(partialAttrIndex);
          idContainerMap.putIfAbsent(id, partialAttrIndex);
          final IndexKey indexKey =
              new IndexKey(attrType, importIndexType, partialAttrIndex
                  .getIndexEntryLimit());
          indexMap.put(indexKey, partialAttrIndex);
        }
      }
    }
    private void clearIndexes(boolean onlyDegraded) throws DatabaseException
    {
      // Clears all the entry's container databases
      // which are containing the indexes.
      if (!onlyDegraded)
      {
        // dn2uri does not have a trusted status.
        entryContainer.clearDatabase(entryContainer.getDN2URI());
      }
      if (!onlyDegraded || !entryContainer.getID2Children().isTrusted()
          || !entryContainer.getID2Subtree().isTrusted())
      {
        entryContainer.clearDatabase(entryContainer.getDN2ID());
        entryContainer.clearDatabase(entryContainer.getID2Children());
        entryContainer.clearDatabase(entryContainer.getID2Subtree());
      }
      if (!indexMap.isEmpty())
      {
        for (final Map.Entry<IndexKey, Index> mapEntry : indexMap.entrySet())
        {
          if (!onlyDegraded || !mapEntry.getValue().isTrusted())
          {
            entryContainer.clearDatabase(mapEntry.getValue());
          }
        }
      }
      if (!extensibleIndexMap.isEmpty())
      {
        for (final Collection<Index> subIndexes : extensibleIndexMap.values())
        {
          if (subIndexes != null)
          {
            for (final Index subIndex : subIndexes)
            {
              entryContainer.clearDatabase(subIndex);
            }
          }
        }
      }
      for (final VLVIndex vlvIndex : entryContainer.getVLVIndexes())
      {
        if (!onlyDegraded || !vlvIndex.isTrusted())
        {
          entryContainer.clearDatabase(vlvIndex);
        }
      }
    }
    private void setRebuildListIndexesTrusted(boolean trusted)
        throws JebException
@@ -3431,7 +3571,8 @@
        }
        if(!indexMap.isEmpty())
        {
          for(Map.Entry<IndexKey, Index> mapEntry : indexMap.entrySet()) {
          for (Map.Entry<IndexKey, Index> mapEntry : indexMap.entrySet())
          {
            Index index = mapEntry.getValue();
            index.setTrusted(null, trusted);
          }
@@ -3447,8 +3588,10 @@
        {
          for(Collection<Index> subIndexes : extensibleIndexMap.values())
          {
            if(subIndexes != null) {
              for(Index subIndex : subIndexes) {
            if (subIndexes != null)
            {
              for (Index subIndex : subIndexes)
              {
                subIndex.setTrusted(null, trusted);
              }
            }
@@ -3463,36 +3606,9 @@
      }
    }
    private void setAllIndexesTrusted(boolean trusted) throws JebException
    private void phaseOne() throws DatabaseException, InterruptedException,
        ExecutionException
    {
      try {
        suffix.setIndexesTrusted(trusted);
      }
      catch (DatabaseException ex)
      {
        Message message =
                NOTE_JEB_IMPORT_LDIF_TRUSTED_FAILED.get(ex.getMessage());
        throw new JebException(message);
      }
    }
    private void phaseOne() throws DatabaseException,
            InterruptedException, ExecutionException {
      switch (rebuildConfig.getRebuildMode())
      {
      case ALL:
        clearAllIndexes(false);
        break;
      case DEGRADED:
        clearAllIndexes(true);
        break;
      default:
        clearRebuildListIndexes();
        break;
      }
      initializeIndexBuffers();
      RebuildFirstPhaseProgressTask progressTask =
              new RebuildFirstPhaseProgressTask();
@@ -3508,15 +3624,18 @@
        tasks.add(this);
      }
      List<Future<Void>> results = rebuildIndexService.invokeAll(tasks);
      for (Future<Void> result : results) {
        if(!result.isDone()) {
      for (Future<Void> result : results)
      {
        if (!result.isDone())
        {
          result.get();
        }
      }
      stopScratchFileWriters();
      for (Future<?> result : scratchFileWriterFutures)
      {
        if(!result.isDone()) {
        if (!result.isDone())
        {
          result.get();
        }
      }
@@ -3538,20 +3657,16 @@
      freeBufferQueue.clear();
    }
    private void phaseTwo()
        throws InterruptedException, ExecutionException
    private void phaseTwo() throws InterruptedException, ExecutionException
    {
      SecondPhaseProgressTask progressTask = new SecondPhaseProgressTask(
          entriesProcessed.get());
      SecondPhaseProgressTask progressTask =
          new SecondPhaseProgressTask(entriesProcessed.get());
      Timer timer2 = new Timer();
      timer2.scheduleAtFixedRate(progressTask, TIMER_INTERVAL, TIMER_INTERVAL);
      processIndexFiles();
      timer2.cancel();
    }
    private int getIndexCount() throws ConfigException, JebException
    {
      switch (rebuildConfig.getRebuildMode())
@@ -3568,7 +3683,6 @@
      }
    }
    private int getRebuildListIndexCount(LocalDBBackendCfg cfg)
            throws JebException, ConfigException
    {
@@ -3595,8 +3709,9 @@
              throw new JebException(msg);
            }
            indexCount++;
          } else if(lowerName.equals("id2subtree") ||
                  lowerName.equals("id2children"))
          }
          else if (lowerName.equals("id2subtree")
              || lowerName.equals("id2children"))
          {
            Message msg = ERR_JEB_ATTRIBUTE_INDEX_NOT_CONFIGURED.get(index);
            throw new JebException(msg);
@@ -3639,7 +3754,9 @@
                else if(attrIndexParts[1].equals("approximate"))
                {
                  indexCount++;
                } else {
                }
                else
                {
                  Message msg =
                          ERR_JEB_ATTRIBUTE_INDEX_NOT_CONFIGURED.get(index);
                  throw new JebException(msg);
@@ -3652,8 +3769,8 @@
                for (String idx : cfg.listLocalDBIndexes())
                {
                  LocalDBIndexCfg indexCfg = cfg.getLocalDBIndex(idx);
                  if (indexCfg.getIndexType().
                          contains(LocalDBIndexCfgDefn.IndexType.EXTENSIBLE))
                  if (indexCfg.getIndexType().contains(
                      LocalDBIndexCfgDefn.IndexType.EXTENSIBLE))
                  {
                    Set<String> extensibleRules =
                            indexCfg.getIndexExtensibleMatchingRule();
@@ -3671,7 +3788,8 @@
                    break;
                  }
                }
                if(!found) {
                if (!found)
                {
                  Message msg =
                          ERR_JEB_ATTRIBUTE_INDEX_NOT_CONFIGURED.get(index);
                  throw new JebException(msg);
@@ -3688,33 +3806,33 @@
                  continue;
                }
                LocalDBIndexCfg indexCfg = cfg.getLocalDBIndex(idx);
                if(indexCfg.getIndexType().
                        contains(LocalDBIndexCfgDefn.IndexType.EQUALITY))
                if (indexCfg.getIndexType().contains(
                    LocalDBIndexCfgDefn.IndexType.EQUALITY))
                {
                  indexCount++;
                }
                if(indexCfg.getIndexType().
                        contains(LocalDBIndexCfgDefn.IndexType.ORDERING))
                if (indexCfg.getIndexType().contains(
                    LocalDBIndexCfgDefn.IndexType.ORDERING))
                {
                  indexCount++;
                }
                if(indexCfg.getIndexType().
                        contains(LocalDBIndexCfgDefn.IndexType.PRESENCE))
                if (indexCfg.getIndexType().contains(
                    LocalDBIndexCfgDefn.IndexType.PRESENCE))
                {
                  indexCount++;
                }
                if(indexCfg.getIndexType().
                        contains(LocalDBIndexCfgDefn.IndexType.SUBSTRING))
                if (indexCfg.getIndexType().contains(
                    LocalDBIndexCfgDefn.IndexType.SUBSTRING))
                {
                  indexCount++;
                }
                if(indexCfg.getIndexType().
                        contains(LocalDBIndexCfgDefn.IndexType.APPROXIMATE))
                if (indexCfg.getIndexType().contains(
                    LocalDBIndexCfgDefn.IndexType.APPROXIMATE))
                {
                  indexCount++;
                }
                if (indexCfg.getIndexType().
                        contains(LocalDBIndexCfgDefn.IndexType.EXTENSIBLE))
                if (indexCfg.getIndexType().contains(
                    LocalDBIndexCfgDefn.IndexType.EXTENSIBLE))
                {
                  Set<String> extensibleRules =
                          indexCfg.getIndexExtensibleMatchingRule();
@@ -3743,358 +3861,9 @@
      return indexCount;
    }
    private void clearRebuildListIndexes() throws DatabaseException
    {
      List<String> rebuildList = rebuildConfig.getRebuildList();
      if(!rebuildList.isEmpty())
      {
        for (String index : rebuildList)
        {
          String lowerName = index.toLowerCase();
          if (lowerName.equals("dn2id"))
          {
            clearDN2IDIndexes(false);
          }
          else if (lowerName.equals("dn2uri"))
          {
            clearDN2URI(false);
          }
          else if (lowerName.startsWith("vlv."))
          {
            VLVIndex vlvIndex = entryContainer.getVLVIndex(lowerName
                .substring(4));
            clearVLVIndex(vlvIndex, false);
          }
          else
          {
            String[] attrIndexParts = lowerName.split("\\.");
            AttributeType attrType =
                    DirectoryServer.getAttributeType(attrIndexParts[0]);
            AttributeIndex attrIndex =
                                   entryContainer.getAttributeIndex(attrType);
            if(attrIndexParts.length != 1)
            {
              Index partialAttrIndex;
              if(attrIndexParts[1].equals("presence"))
              {
                partialAttrIndex = attrIndex.getPresenceIndex();
                int id = System.identityHashCode(partialAttrIndex);
                idContainerMap.putIfAbsent(id, partialAttrIndex);
                entryContainer.clearDatabase(partialAttrIndex);
                IndexKey indexKey =
                        new IndexKey(attrType, ImportIndexType.PRESENCE,
                                partialAttrIndex.getIndexEntryLimit());
                indexMap.put(indexKey, partialAttrIndex);
              }
              else if(attrIndexParts[1].equals("equality"))
              {
                partialAttrIndex = attrIndex.getEqualityIndex();
                int id = System.identityHashCode(partialAttrIndex);
                idContainerMap.putIfAbsent(id, partialAttrIndex);
                entryContainer.clearDatabase(partialAttrIndex);
                IndexKey indexKey =
                        new IndexKey(attrType, ImportIndexType.EQUALITY,
                                partialAttrIndex.getIndexEntryLimit());
                indexMap.put(indexKey, partialAttrIndex);
              }
              else if(attrIndexParts[1].equals("substring"))
              {
                partialAttrIndex = attrIndex.getSubstringIndex();
                int id = System.identityHashCode(partialAttrIndex);
                idContainerMap.putIfAbsent(id, partialAttrIndex);
                entryContainer.clearDatabase(partialAttrIndex);
                IndexKey indexKey =
                        new IndexKey(attrType, ImportIndexType.SUBSTRING,
                                partialAttrIndex.getIndexEntryLimit());
                indexMap.put(indexKey, partialAttrIndex);
              }
              else if(attrIndexParts[1].equals("ordering"))
              {
                partialAttrIndex = attrIndex.getOrderingIndex();
                int id = System.identityHashCode(partialAttrIndex);
                idContainerMap.putIfAbsent(id, partialAttrIndex);
                entryContainer.clearDatabase(partialAttrIndex);
                IndexKey indexKey =
                        new IndexKey(attrType, ImportIndexType.ORDERING,
                                partialAttrIndex.getIndexEntryLimit());
                indexMap.put(indexKey, partialAttrIndex);
              }
              else if(attrIndexParts[1].equals("approximate"))
              {
                partialAttrIndex = attrIndex.getApproximateIndex();
                int id = System.identityHashCode(partialAttrIndex);
                idContainerMap.putIfAbsent(id, partialAttrIndex);
                entryContainer.clearDatabase(partialAttrIndex);
                IndexKey indexKey =
                        new IndexKey(attrType, ImportIndexType.APPROXIMATE,
                                partialAttrIndex.getIndexEntryLimit());
                indexMap.put(indexKey, partialAttrIndex);
              }
              else
              {
                String dbPart = "shared";
                if(attrIndexParts[2].startsWith("sub"))
                {
                  dbPart = "substring";
                }
                StringBuilder nameBldr = new StringBuilder();
                nameBldr.append(entryContainer.getDatabasePrefix());
                nameBldr.append("_");
                nameBldr.append(attrIndexParts[0]);
                nameBldr.append(".");
                nameBldr.append(attrIndexParts[1]);
                nameBldr.append(".");
                nameBldr.append(dbPart);
                String indexName = nameBldr.toString();
                Map<String,Collection<Index>> extensibleMap =
                        attrIndex.getExtensibleIndexes();
                if(!extensibleMap.isEmpty()) {
                  Collection<Index> subIndexes =
                          attrIndex.getExtensibleIndexes().get(
                                  EXTENSIBLE_INDEXER_ID_SUBSTRING);
                  if(subIndexes != null) {
                    for(Index subIndex : subIndexes) {
                      String name = subIndex.getName();
                      if(name.equalsIgnoreCase(indexName))
                      {
                        entryContainer.clearDatabase(subIndex);
                        int id = System.identityHashCode(subIndex);
                        idContainerMap.putIfAbsent(id, subIndex);
                        Collection<Index> substring = new ArrayList<Index>();
                        substring.add(subIndex);
                        extensibleIndexMap.put(new IndexKey(attrType,
                                ImportIndexType.EX_SUBSTRING, 0),substring);
                        break;
                      }
                    }
                    Collection<Index> sharedIndexes =
                            attrIndex.getExtensibleIndexes().
                                              get(EXTENSIBLE_INDEXER_ID_SHARED);
                    if(sharedIndexes !=null) {
                      for(Index sharedIndex : sharedIndexes) {
                        String name = sharedIndex.getName();
                        if(name.equalsIgnoreCase(indexName))
                        {
                          entryContainer.clearDatabase(sharedIndex);
                          Collection<Index> shared = new ArrayList<Index>();
                          int id = System.identityHashCode(sharedIndex);
                          idContainerMap.putIfAbsent(id, sharedIndex);
                          shared.add(sharedIndex);
                          extensibleIndexMap.put(new IndexKey(attrType,
                                  ImportIndexType.EX_SHARED, 0), shared);
                          break;
                        }
                      }
                    }
                  }
                }
              }
            }
            else
            {
              clearAttributeIndexes(attrIndex, attrType, false);
            }
          }
        }
      }
    }
    private void clearAllIndexes(boolean onlyDegraded) throws DatabaseException
    {
      for(Map.Entry<AttributeType, AttributeIndex> mapEntry :
              suffix.getAttrIndexMap().entrySet()) {
        AttributeType attributeType = mapEntry.getKey();
        AttributeIndex attributeIndex = mapEntry.getValue();
        clearAttributeIndexes(attributeIndex, attributeType, onlyDegraded);
      }
      for(VLVIndex vlvIndex : suffix.getEntryContainer().getVLVIndexes()) {
        clearVLVIndex(vlvIndex, onlyDegraded);
      }
      clearDN2IDIndexes(onlyDegraded);
      if(entryContainer.getDN2URI() != null)
      {
        clearDN2URI(onlyDegraded);
      }
    }
    private void clearVLVIndex(VLVIndex vlvIndex, boolean onlyDegraded)
        throws DatabaseException
    {
      if (!onlyDegraded || !vlvIndex.isTrusted())
      {
        entryContainer.clearDatabase(vlvIndex);
        vlvIndexes.add(vlvIndex);
      }
    }
    private void clearDN2URI(boolean onlyDegraded) throws DatabaseException
    {
      if (!onlyDegraded)
      {
        // dn2uri does not have a trusted status.
        entryContainer.clearDatabase(entryContainer.getDN2URI());
        dn2uri = entryContainer.getDN2URI();
      }
    }
    private void clearDN2IDIndexes(boolean onlyDegraded)
        throws DatabaseException
    {
      if (!onlyDegraded || !entryContainer.getID2Children().isTrusted()
          || !entryContainer.getID2Subtree().isTrusted())
      {
        entryContainer.clearDatabase(entryContainer.getDN2ID());
        entryContainer.clearDatabase(entryContainer.getID2Children());
        entryContainer.clearDatabase(entryContainer.getID2Subtree());
        dn2id = entryContainer.getDN2ID();
      }
    }
    private void clearAttributeIndexes(AttributeIndex attrIndex,
                                      AttributeType attrType,
                                      boolean onlyDegraded)
            throws DatabaseException
    {
      if (attrIndex.getSubstringIndex() != null)
      {
        Index partialAttrIndex = attrIndex.getSubstringIndex();
        if (!onlyDegraded || !partialAttrIndex.isTrusted())
        {
          int id = System.identityHashCode(partialAttrIndex);
          idContainerMap.putIfAbsent(id, partialAttrIndex);
          entryContainer.clearDatabase(partialAttrIndex);
          IndexKey indexKey = new IndexKey(attrType, ImportIndexType.SUBSTRING,
              partialAttrIndex.getIndexEntryLimit());
          indexMap.put(indexKey, partialAttrIndex);
        }
      }
      if (attrIndex.getOrderingIndex() != null)
      {
        Index partialAttrIndex = attrIndex.getOrderingIndex();
        if (!onlyDegraded || !partialAttrIndex.isTrusted())
        {
          int id = System.identityHashCode(partialAttrIndex);
          idContainerMap.putIfAbsent(id, partialAttrIndex);
          entryContainer.clearDatabase(partialAttrIndex);
          IndexKey indexKey = new IndexKey(attrType, ImportIndexType.ORDERING,
              partialAttrIndex.getIndexEntryLimit());
          indexMap.put(indexKey, partialAttrIndex);
        }
      }
      if(attrIndex.getEqualityIndex() != null)
      {
        Index partialAttrIndex = attrIndex.getEqualityIndex();
        if (!onlyDegraded || !partialAttrIndex.isTrusted())
        {
          int id = System.identityHashCode(partialAttrIndex);
          idContainerMap.putIfAbsent(id, partialAttrIndex);
          entryContainer.clearDatabase(partialAttrIndex);
          IndexKey indexKey = new IndexKey(attrType, ImportIndexType.EQUALITY,
              partialAttrIndex.getIndexEntryLimit());
          indexMap.put(indexKey, partialAttrIndex);
        }
      }
      if (attrIndex.getPresenceIndex() != null)
      {
        Index partialAttrIndex = attrIndex.getPresenceIndex();
        if (!onlyDegraded || !partialAttrIndex.isTrusted())
        {
          int id = System.identityHashCode(partialAttrIndex);
          idContainerMap.putIfAbsent(id, partialAttrIndex);
          entryContainer.clearDatabase(partialAttrIndex);
          IndexKey indexKey = new IndexKey(attrType, ImportIndexType.PRESENCE,
              partialAttrIndex.getIndexEntryLimit());
          indexMap.put(indexKey, partialAttrIndex);
        }
      }
      if (attrIndex.getApproximateIndex() != null)
      {
        Index partialAttrIndex = attrIndex.getApproximateIndex();
        if (!onlyDegraded || !partialAttrIndex.isTrusted())
        {
          int id = System.identityHashCode(partialAttrIndex);
          idContainerMap.putIfAbsent(id, partialAttrIndex);
          entryContainer.clearDatabase(partialAttrIndex);
          IndexKey indexKey = new IndexKey(attrType,
              ImportIndexType.APPROXIMATE,
              partialAttrIndex.getIndexEntryLimit());
          indexMap.put(indexKey, partialAttrIndex);
        }
      }
      Map<String, Collection<Index>> extensibleMap = attrIndex
          .getExtensibleIndexes();
      if (!extensibleMap.isEmpty())
      {
        Collection<Index> subIndexes = attrIndex.getExtensibleIndexes().get(
            EXTENSIBLE_INDEXER_ID_SUBSTRING);
        if (subIndexes != null && !subIndexes.isEmpty())
        {
          List<Index> mutableCopy = new LinkedList<Index>(subIndexes);
          Iterator<Index> i = mutableCopy.iterator();
          while (i.hasNext())
          {
            Index subIndex = i.next();
            if (!onlyDegraded || !subIndex.isTrusted())
            {
              entryContainer.clearDatabase(subIndex);
              int id = System.identityHashCode(subIndex);
              idContainerMap.putIfAbsent(id, subIndex);
            }
            else
            {
              // This index is not a candidate for rebuilding.
              i.remove();
            }
          }
          if (!mutableCopy.isEmpty())
          {
            extensibleIndexMap.put(new IndexKey(attrType,
                ImportIndexType.EX_SUBSTRING, 0), mutableCopy);
          }
        }
        Collection<Index> sharedIndexes = attrIndex.getExtensibleIndexes().get(
            EXTENSIBLE_INDEXER_ID_SHARED);
        if (sharedIndexes != null && !sharedIndexes.isEmpty())
        {
          List<Index> mutableCopy = new LinkedList<Index>(sharedIndexes);
          Iterator<Index> i = mutableCopy.iterator();
          while (i.hasNext())
          {
            Index sharedIndex = i.next();
            if (!onlyDegraded || !sharedIndex.isTrusted())
            {
              entryContainer.clearDatabase(sharedIndex);
              int id = System.identityHashCode(sharedIndex);
              idContainerMap.putIfAbsent(id, sharedIndex);
            }
            else
            {
              // This index is not a candidate for rebuilding.
              i.remove();
            }
          }
          if (!mutableCopy.isEmpty())
          {
            extensibleIndexMap.put(new IndexKey(attrType,
                ImportIndexType.EX_SHARED, 0), mutableCopy);
          }
        }
      }
    }
    private
    void processEntry(Entry entry, EntryID entryID) throws DatabaseException,
            DirectoryException, JebException, InterruptedException
    private void processEntry(Entry entry, EntryID entryID)
        throws DatabaseException, DirectoryException, JebException,
        InterruptedException
    {
      if(dn2id != null)
      {
@@ -4109,63 +3878,60 @@
      processVLVIndexes(entry, entryID);
    }
    private void processVLVIndexes(Entry entry, EntryID entryID)
            throws DatabaseException, JebException, DirectoryException
    {
      for(VLVIndex vlvIdx : suffix.getEntryContainer().getVLVIndexes()) {
      for (VLVIndex vlvIdx : suffix.getEntryContainer().getVLVIndexes())
      {
        Transaction transaction = null;
        vlvIdx.addEntry(transaction, entryID, entry);
      }
    }
    private
    void processExtensibleIndexes(Entry entry, EntryID entryID) throws
            InterruptedException
    private void processExtensibleIndexes(Entry entry, EntryID entryID)
        throws InterruptedException
    {
      for(Map.Entry<IndexKey, Collection<Index>> mapEntry :
              this.extensibleIndexMap.entrySet()) {
          this.extensibleIndexMap.entrySet())
      {
        IndexKey key = mapEntry.getKey();
        AttributeType attrType = key.getAttributeType();
        if(entry.hasAttribute(attrType)) {
        if (entry.hasAttribute(attrType))
        {
          Collection<Index> indexes = mapEntry.getValue();
          for(Index index : indexes) {
          for (Index index : indexes)
          {
            processAttribute(index, entry, entryID, key);
          }
        }
      }
    }
    private void
    processIndexes(Entry entry, EntryID entryID) throws
            DatabaseException, InterruptedException
    private void processIndexes(Entry entry, EntryID entryID)
        throws DatabaseException, InterruptedException
    {
      for(Map.Entry<IndexKey, Index> mapEntry :
              indexMap.entrySet()) {
      for (Map.Entry<IndexKey, Index> mapEntry : indexMap.entrySet())
      {
        IndexKey key = mapEntry.getKey();
        AttributeType attrType = key.getAttributeType();
        if(entry.hasAttribute(attrType)) {
        if (entry.hasAttribute(attrType))
        {
          ImportIndexType indexType = key.getIndexType();
          Index index = mapEntry.getValue();
          if(indexType == ImportIndexType.SUBSTRING)
          {
            processAttribute(index, entry, entryID,
                    new IndexKey(attrType, ImportIndexType.SUBSTRING,
                            index.getIndexEntryLimit()));
            processAttribute(index, entry, entryID, new IndexKey(attrType,
                ImportIndexType.SUBSTRING, index.getIndexEntryLimit()));
          }
          else
          {
            processAttribute(index, entry, entryID,
                    new IndexKey(attrType, indexType,
                                 index.getIndexEntryLimit()));
            processAttribute(index, entry, entryID, new IndexKey(attrType,
                indexType, index.getIndexEntryLimit()));
          }
        }
      }
    }
    /**
     * Return the number of entries processed by the rebuild manager.
     *
@@ -4176,7 +3942,6 @@
      return this.entriesProcessed.get();
    }
    /**
     * Return the total number of entries to process by the rebuild manager.
     *
@@ -4188,34 +3953,36 @@
    }
    @Override
    public void diskLowThresholdReached(DiskSpaceMonitor monitor) {
    public void diskLowThresholdReached(DiskSpaceMonitor monitor)
    {
      diskFullThresholdReached(monitor);
    }
    @Override
    public void diskFullThresholdReached(DiskSpaceMonitor monitor) {
    public void diskFullThresholdReached(DiskSpaceMonitor monitor)
    {
      isCanceled = true;
      Message msg = ERR_REBUILD_INDEX_LACK_DISK.get(
          monitor.getDirectory().getPath(), monitor.getFreeSpace(),
          monitor.getLowThreshold());
      Message msg =
          ERR_REBUILD_INDEX_LACK_DISK.get(monitor.getDirectory().getPath(),
              monitor.getFreeSpace(), monitor.getLowThreshold());
      logError(msg);
    }
    @Override
    public void diskSpaceRestored(DiskSpaceMonitor monitor) {
    public void diskSpaceRestored(DiskSpaceMonitor monitor)
    {
      // Do nothing
    }
  }
  /**
   * This class reports progress of rebuild index processing at fixed
   * intervals.
   * This class reports progress of rebuild index processing at fixed intervals.
   */
  private class RebuildFirstPhaseProgressTask extends TimerTask
  {
    /**
     * The number of records that had been processed at the time of the
     * previous progress report.
     * The number of records that had been processed at the time of the previous
     * progress report.
     */
    private long previousProcessed = 0;
@@ -4232,8 +3999,8 @@
    /**
     * Create a new rebuild index progress task.
     *
     * @throws DatabaseException If an error occurred while accessing the JE
     *                           database.
     * @throws DatabaseException
     *           If an error occurred while accessing the JE database.
     */
    public RebuildFirstPhaseProgressTask() throws DatabaseException
    {
@@ -4262,8 +4029,9 @@
      {
        completed = 100f*entriesProcessed / rebuildManager.getTotEntries();
      }
      Message message = NOTE_JEB_REBUILD_PROGRESS_REPORT.get(completed,
                      entriesProcessed, rebuildManager.getTotEntries(), rate);
      Message message =
          NOTE_JEB_REBUILD_PROGRESS_REPORT.get(completed, entriesProcessed,
              rebuildManager.getTotEntries(), rate);
      logError(message);
      try
      {
@@ -4279,8 +4047,9 @@
        {
          cacheMissRate = nCacheMiss/(float)deltaCount;
        }
        message = NOTE_JEB_REBUILD_CACHE_AND_MEMORY_REPORT.get(
                freeMemory, cacheMissRate);
        message =
            NOTE_JEB_REBUILD_CACHE_AND_MEMORY_REPORT.get(freeMemory,
                cacheMissRate);
        logError(message);
        prevEnvStats = envStats;
      }
@@ -4293,16 +4062,15 @@
    }
  }
  /**
   * This class reports progress of first phase of import processing at
   * fixed intervals.
   * This class reports progress of first phase of import processing at fixed
   * intervals.
   */
  private final class FirstPhaseProgressTask extends TimerTask
  {
    /**
     * The number of entries that had been read at the time of the
     * previous progress report.
     * The number of entries that had been read at the time of the previous
     * progress report.
     */
    private long previousCount = 0;
@@ -4316,14 +4084,12 @@
     */
    private EnvironmentStats previousStats;
    // Determines if eviction has been detected.
    private boolean evicting = false;
    // Entry count when eviction was detected.
    private long evictionEntryCount = 0;
    /**
     * Create a new import progress task.
     */
@@ -4332,8 +4098,7 @@
      previousTime = System.currentTimeMillis();
      try
      {
        previousStats =
                rootContainer.getEnvironmentStats(new StatsConfig());
        previousStats = rootContainer.getEnvironmentStats(new StatsConfig());
      }
      catch (DatabaseException e)
      {
@@ -4341,8 +4106,6 @@
      }
    }
    /**
     * The action to be performed by this timer task.
     */
@@ -4362,8 +4125,9 @@
      long entriesIgnored = reader.getEntriesIgnored();
      long entriesRejected = reader.getEntriesRejected();
      float rate = 1000f * deltaCount / deltaTime;
      message = NOTE_JEB_IMPORT_PROGRESS_REPORT.get(entriesRead,
              entriesIgnored, entriesRejected, 0, rate);
      message =
          NOTE_JEB_IMPORT_PROGRESS_REPORT.get(entriesRead, entriesIgnored,
              entriesRejected, 0, rate);
      logError(message);
      try
      {
@@ -4382,8 +4146,8 @@
        {
          environmentStats = tmpEnv.getEnvironmentStats(new StatsConfig());
        }
        long nCacheMiss = environmentStats.getNCacheMiss() -
                previousStats.getNCacheMiss();
        long nCacheMiss =
            environmentStats.getNCacheMiss() - previousStats.getNCacheMiss();
        float cacheMissRate = 0;
        if (deltaCount > 0)
@@ -4399,8 +4163,7 @@
        long evictBinsStrip = environmentStats.getNBINsStripped();
        long cleanerRuns = environmentStats.getNCleanerRuns();
        long cleanerDeletions = environmentStats.getNCleanerDeletions();
        long cleanerEntriesRead =
                environmentStats.getNCleanerEntriesRead();
        long cleanerEntriesRead = environmentStats.getNCleanerEntriesRead();
        long cleanerINCleaned = environmentStats.getNINsCleaned();
        long checkPoints = environmentStats.getNCheckpoints();
        if (evictPasses != 0)
@@ -4410,27 +4173,24 @@
            evicting = true;
            evictionEntryCount = reader.getEntriesRead();
            message =
                    NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED
                            .get(evictionEntryCount);
                NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED.get(evictionEntryCount);
            logError(message);
          }
          message =
                  NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED_STATS.get(
                          evictPasses, evictNodes, evictBinsStrip);
              NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED_STATS.get(evictPasses,
                  evictNodes, evictBinsStrip);
          logError(message);
        }
        if (cleanerRuns != 0)
        {
          message =
                  NOTE_JEB_IMPORT_LDIF_CLEANER_STATS.get(cleanerRuns,
                          cleanerDeletions, cleanerEntriesRead,
                          cleanerINCleaned);
                  cleanerDeletions, cleanerEntriesRead, cleanerINCleaned);
          logError(message);
        }
        if (checkPoints > 1)
        {
          message =
                  NOTE_JEB_IMPORT_LDIF_BUFFER_CHECKPOINTS.get(checkPoints);
          message = NOTE_JEB_IMPORT_LDIF_BUFFER_CHECKPOINTS.get(checkPoints);
          logError(message);
        }
        previousStats = environmentStats;
@@ -4444,8 +4204,6 @@
    }
  }
  /**
   * This class reports progress of the second phase of import processing at
   * fixed intervals.
@@ -4453,8 +4211,8 @@
  private class SecondPhaseProgressTask extends TimerTask
  {
    /**
     * The number of entries that had been read at the time of the
     * previous progress report.
     * The number of entries that had been read at the time of the previous
     * progress report.
     */
    private long previousCount = 0;
@@ -4473,8 +4231,6 @@
    private long latestCount;
    /**
     * Create a new import progress task.
     *
@@ -4495,7 +4251,6 @@
      }
    }
    /**
     * The action to be performed by this timer task.
     */
@@ -4516,8 +4271,8 @@
        long freeMemory = runTime.freeMemory() / MB;
        EnvironmentStats environmentStats =
                rootContainer.getEnvironmentStats(new StatsConfig());
        long nCacheMiss = environmentStats.getNCacheMiss() -
                          previousStats.getNCacheMiss();
        long nCacheMiss =
            environmentStats.getNCacheMiss() - previousStats.getNCacheMiss();
        float cacheMissRate = 0;
        if (deltaCount > 0)
@@ -4543,22 +4298,20 @@
            evicting = true;
          }
          message =
                  NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED_STATS.get(
                          evictPasses, evictNodes, evictBinsStrip);
              NOTE_JEB_IMPORT_LDIF_EVICTION_DETECTED_STATS.get(evictPasses,
                  evictNodes, evictBinsStrip);
          logError(message);
        }
        if (cleanerRuns != 0)
        {
          message =
                  NOTE_JEB_IMPORT_LDIF_CLEANER_STATS.get(cleanerRuns,
                          cleanerDeletions, cleanerEntriesRead,
                          cleanerINCleaned);
                  cleanerDeletions, cleanerEntriesRead, cleanerINCleaned);
          logError(message);
        }
        if (checkPoints > 1)
        {
          message =
                  NOTE_JEB_IMPORT_LDIF_BUFFER_CHECKPOINTS.get(checkPoints);
          message = NOTE_JEB_IMPORT_LDIF_BUFFER_CHECKPOINTS.get(checkPoints);
          logError(message);
        }
        previousStats = environmentStats;
@@ -4583,19 +4336,16 @@
    }
  }
  /**
   * A class to hold information about the entry determined by the LDIF reader.
   * Mainly the suffix the entry belongs under and the ID assigned to it by the
   * reader.
   *
   */
  public class EntryInformation
  {
    private EntryID entryID;
    private Suffix suffix;
    /**
     * Return the suffix associated with the entry.
     *
@@ -4609,7 +4359,8 @@
    /**
     * Set the suffix instance associated with the entry.
     *
     * @param suffix The suffix associated with the entry.
     * @param suffix
     *          The suffix associated with the entry.
     */
    public void setSuffix(Suffix suffix)
    {
@@ -4619,7 +4370,8 @@
    /**
     * Set the entry's ID.
     *
     * @param entryID The entry ID to set the entry ID to.
     * @param entryID
     *          The entry ID to set the entry ID to.
     */
    public void setEntryID(EntryID entryID)
    {
@@ -4639,9 +4391,9 @@
  /**
   * This class defines the individual index type available.
   *
   */
  public enum ImportIndexType {
  public enum ImportIndexType
  {
    /**
     * The DN index type.
     **/
@@ -4689,25 +4441,27 @@
  }
  /**
   * This class is used as an index key for hash maps that need to
   * process multiple suffix index elements into a single queue and/or maps
   * based on both attribute type and index type
   * (ie., cn.equality, sn.equality,...).
   * This class is used as an index key for hash maps that need to process
   * multiple suffix index elements into a single queue and/or maps based on
   * both attribute type and index type (ie., cn.equality, sn.equality,...).
   */
  public class IndexKey {
  public class IndexKey
  {
    private final AttributeType attributeType;
    private final ImportIndexType indexType;
    private final int entryLimit;
   /**
     * Create index key instance using the specified attribute type, index type
     * and index entry limit.
     *
     * @param attributeType The attribute type.
     * @param indexType The index type.
     * @param entryLimit The entry limit for the index.
     * @param attributeType
     *          The attribute type.
     * @param indexType
     *          The index type.
     * @param entryLimit
     *          The entry limit for the index.
     */
    IndexKey(AttributeType attributeType, ImportIndexType indexType,
             int entryLimit)
@@ -4719,20 +4473,21 @@
    /**
     * An equals method that uses both the attribute type and the index type.
     * Only returns {@code true} if the attribute type and index type are
     * equal.
     * Only returns {@code true} if the attribute type and index type are equal.
     *
     * @param obj the object to compare.
     * @param obj
     *          the object to compare.
     * @return {@code true} if the objects are equal, or {@code false} if they
     *         are not.
     */
    @Override
    public boolean equals(Object obj)
    {
      if (obj instanceof IndexKey) {
      if (obj instanceof IndexKey)
      {
        IndexKey oKey = (IndexKey) obj;
        if(attributeType.equals(oKey.getAttributeType()) &&
           indexType.equals(oKey.getIndexType()))
        if (attributeType.equals(oKey.getAttributeType())
            && indexType.equals(oKey.getIndexType()))
        {
          return true;
        }
@@ -4774,16 +4529,16 @@
    }
    /**
     * Return the index key name, which is the attribute type primary name,
     * a period, and the index type name. Used for building file names and
     * Return the index key name, which is the attribute type primary name, a
     * period, and the index type name. Used for building file names and
     * progress output.
     *
     * @return  The index key name.
     */
    public String getName()
    {
      return attributeType.getPrimaryName() + "." +
              StaticUtils.toLowerCase(indexType.name());
      return attributeType.getPrimaryName() + "."
          + StaticUtils.toLowerCase(indexType.name());
    }
    /**
@@ -4797,22 +4552,24 @@
    }
  }
  /**
   * The temporary enviroment will be shared when multiple suffixes are being
   * The temporary environment will be shared when multiple suffixes are being
   * processed. This interface is used by those suffix instance to do parental
   * checking of the DN cache.
   */
  public static interface DNCache {
  public static interface DNCache
  {
    /**
     * Returns {@code true} if the specified DN is contained in the DN cache,
     * or {@code false} otherwise.
     * Returns {@code true} if the specified DN is contained in the DN cache, or
     * {@code false} otherwise.
     *
     * @param dn  The DN to check the presence of.
     * @param dn
     *          The DN to check the presence of.
     * @return {@code true} if the cache contains the DN, or {@code false} if it
     *                      is not.
     * @throws DatabaseException If an error occurs reading the database.
     * @throws DatabaseException
     *           If an error occurs reading the database.
     */
    public boolean contains(DN dn) throws DatabaseException;
  }
@@ -4833,9 +4590,11 @@
     * Create a temporary DB environment and database to be used as a cache of
     * DNs when DN validation is performed in phase one processing.
     *
     * @param envPath The file path to create the enviroment under.
     * @throws DatabaseException If an error occurs either creating the
     *                           environment or the DN database.
     * @param envPath
     *          The file path to create the environment under.
     * @throws DatabaseException
     *           If an error occurs either creating the environment or the DN
     *           database.
     */
    public TmpEnv(File envPath) throws DatabaseException
    {
@@ -4848,8 +4607,8 @@
      envConfig.setConfigParam(EnvironmentConfig.ENV_RUN_CHECKPOINTER, "false");
      envConfig.setConfigParam(EnvironmentConfig.EVICTOR_LRU_ONLY, "false");
      envConfig.setConfigParam(EnvironmentConfig.EVICTOR_NODES_PER_SCAN, "128");
      envConfig.setConfigParam(EnvironmentConfig.MAX_MEMORY,
                                 Long.toString(tmpEnvCacheSize));
      envConfig.setConfigParam(EnvironmentConfig.MAX_MEMORY, Long
          .toString(tmpEnvCacheSize));
      DatabaseConfig dbConfig = new DatabaseConfig();
      dbConfig.setAllowCreate(true);
      dbConfig.setTransactional(false);
@@ -4866,7 +4625,8 @@
    private byte[] hashCode(byte[] b)
    {
      long hash = FNV_INIT;
      for (byte aB : b) {
      for (byte aB : b)
      {
        hash ^= aB;
        hash *= FNV_PRIME;
      }
@@ -4875,7 +4635,9 @@
    /**
     * Shutdown the temporary environment.
     * @throws JebException If error occurs.
     *
     * @throws JebException
     *           If error occurs.
     */
    public void shutdown() throws JebException
    {
@@ -4889,13 +4651,17 @@
     * the DN does not already exist in the cache and was inserted, or
     * {@code false} if the DN exists already in the cache.
     *
     * @param dn The DN to insert in the cache.
     * @param val A database entry to use in the insert.
     * @param key A database entry to use in the insert.
     * @param dn
     *          The DN to insert in the cache.
     * @param val
     *          A database entry to use in the insert.
     * @param key
     *          A database entry to use in the insert.
     * @return  {@code true} if the DN was inserted in the cache, or
     *          {@code false} if the DN exists in the cache already and could
     *           not be inserted.
     * @throws JebException If an error occurs accessing the database.
     *         {@code false} if the DN exists in the cache already and could not
     *         be inserted.
     * @throws JebException
     *           If an error occurs accessing the database.
     */
    public boolean insert(DN dn, DatabaseEntry val, DatabaseEntry key)
            throws JebException
@@ -4926,7 +4692,8 @@
          status = cursor.getSearchKey(key, dns, LockMode.RMW);
          if(status == OperationStatus.NOTFOUND)
          {
            Message message = Message.raw(Category.JEB, Severity.SEVERE_ERROR,
            Message message =
                Message.raw(Category.JEB, Severity.SEVERE_ERROR,
                    "Search DN cache failed.");
            throw new JebException(message);
          }
@@ -4948,8 +4715,8 @@
    }
    //Add the DN to the DNs as because of a hash collision.
    private void addDN(DatabaseEntry val, Cursor cursor,
                       byte[] dnBytes) throws JebException
    private void addDN(DatabaseEntry val, Cursor cursor, byte[] dnBytes)
        throws JebException
    {
      byte[] bytes = val.getData();
      int pLen = PackedInteger.getWriteIntLength(dnBytes.length);
@@ -4963,7 +4730,8 @@
      OperationStatus status = cursor.putCurrent(newVal);
      if(status != OperationStatus.SUCCESS)
      {
        Message message = Message.raw(Category.JEB, Severity.SEVERE_ERROR,
        Message message =
            Message.raw(Category.JEB, Severity.SEVERE_ERROR,
                "Add of DN to DN cache failed.");
        throw new JebException(message);
      }
@@ -4992,9 +4760,10 @@
    /**
     * Check if the specified DN is contained in the temporary DN cache.
     *
     * @param dn A DN check for.
     * @return  {@code true} if the specified DN is in the temporary DN cache,
     *          or {@code false} if it is not.
     * @param dn
     *          A DN check for.
     * @return {@code true} if the specified DN is in the temporary DN cache, or
     *         {@code false} if it is not.
     */
    @Override
    public boolean contains(DN dn)
@@ -5004,7 +4773,8 @@
      DatabaseEntry key = new DatabaseEntry();
      byte[] dnBytes = StaticUtils.getBytes(dn.toNormalizedString());
      key.setData(hashCode(dnBytes));
      try {
      try
      {
        cursor = dnCache.openCursor(null, CursorConfig.DEFAULT);
        DatabaseEntry dns = new DatabaseEntry();
        OperationStatus status =
@@ -5027,10 +4797,11 @@
    /**
     * Return temporary environment stats.
     *
     * @param statsConfig A stats configuration instance.
     *
     * @param statsConfig
     *          A stats configuration instance.
     * @return Environment stats.
     * @throws DatabaseException  If an error occurs retrieving the stats.
     * @throws DatabaseException
     *           If an error occurs retrieving the stats.
     */
    public EnvironmentStats getEnvironmentStats(StatsConfig statsConfig)
            throws DatabaseException
@@ -5043,7 +4814,8 @@
   * {@inheritDoc}
   */
  @Override
  public void diskLowThresholdReached(DiskSpaceMonitor monitor) {
  public void diskLowThresholdReached(DiskSpaceMonitor monitor)
  {
    diskFullThresholdReached(monitor);
  }
@@ -5051,20 +4823,21 @@
   * {@inheritDoc}
   */
  @Override
  public void diskFullThresholdReached(DiskSpaceMonitor monitor) {
  public void diskFullThresholdReached(DiskSpaceMonitor monitor)
  {
    isCanceled = true;
    Message msg;
    if(!isPhaseOneDone)
    {
      msg = ERR_IMPORT_LDIF_LACK_DISK_PHASE_ONE.get(
          monitor.getDirectory().getPath(), monitor.getFreeSpace(),
          monitor.getLowThreshold());
      msg =
          ERR_IMPORT_LDIF_LACK_DISK_PHASE_ONE.get(monitor.getDirectory()
              .getPath(), monitor.getFreeSpace(), monitor.getLowThreshold());
    }
    else
    {
      msg = ERR_IMPORT_LDIF_LACK_DISK_PHASE_TWO.get(
          monitor.getDirectory().getPath(), monitor.getFreeSpace(),
          monitor.getLowThreshold());
      msg =
          ERR_IMPORT_LDIF_LACK_DISK_PHASE_TWO.get(monitor.getDirectory()
              .getPath(), monitor.getFreeSpace(), monitor.getLowThreshold());
    }
    logError(msg);
  }
@@ -5073,7 +4846,8 @@
   * {@inheritDoc}
   */
  @Override
  public void diskSpaceRestored(DiskSpaceMonitor monitor) {
  public void diskSpaceRestored(DiskSpaceMonitor monitor)
  {
    // Do nothing.
  }
}
opends/src/server/org/opends/server/config/ConfigConstants.java
@@ -23,7 +23,7 @@
 *
 *
 *      Copyright 2006-2010 Sun Microsystems, Inc.
 *      Portions copyright 2011-2012 ForgeRock AS
 *      Portions copyright 2011-2013 ForgeRock AS
 */
package org.opends.server.config;
@@ -4332,6 +4332,13 @@
  public static final String ATTR_REBUILD_INDEX =
       NAME_PREFIX_TASK + "rebuild-index";
  /**
   * The name of the attribute in an rebuild task definition that specifies the
   * degraded index which needs to be clear.
   */
  public static final String ATTR_REBUILD_INDEX_CLEARDEGRADEDSTATE =
      ATTR_REBUILD_INDEX + "-clear-degraded-state";
  /**
   * The name of the attribute in an rebuild task definition that specifies the
opends/src/server/org/opends/server/tools/RebuildIndex.java
@@ -23,7 +23,7 @@
 *
 *
 *      Copyright 2006-2009 Sun Microsystems, Inc.
 *      Portions copyright 2012 ForgeRock AS.
 *      Portions Copyright 2011-2013 ForgeRock AS
 */
package org.opends.server.tools;
import org.opends.messages.Message;
@@ -41,6 +41,7 @@
import static org.opends.server.config.ConfigConstants.*;
import static org.opends.server.loggers.ErrorLogger.logError;
import org.opends.server.loggers.TextWriter;
import org.opends.server.loggers.ErrorLogger;
import org.opends.server.loggers.TextErrorLogPublisher;
@@ -85,6 +86,7 @@
  private StringArgument  tmpDirectory            = null;
  private BooleanArgument rebuildAll              = null;
  private BooleanArgument rebuildDegraded         = null;
  private BooleanArgument clearDegradedState      = null;
  /**
   * Processes the command-line arguments and invokes the rebuild process.
@@ -203,6 +205,10 @@
                    INFO_REBUILDINDEX_DESCRIPTION_REBUILD_DEGRADED.get());
      argParser.addArgument(rebuildDegraded);
      clearDegradedState =
          new BooleanArgument("clearDegradedState", null, "clearDegradedState",
                   INFO_REBUILDINDEX_DESCRIPTION_CLEAR_DEGRADED_STATE.get());
      argParser.addArgument(clearDegradedState);
      tmpDirectory =
           new StringArgument("tmpdirectory", null, "tmpdirectory", false,
@@ -557,6 +563,9 @@
    }
    else
    {
      if(clearDegradedState.isPresent()) {
        rebuildConfig.isClearDegradedState(true);
      }
      rebuildConfig.setRebuildMode(RebuildMode.USER_DEFINED);
    }
@@ -660,6 +669,15 @@
      attributes.add(
              new LDAPAttribute(ATTR_REBUILD_INDEX, values));
    }
    if (clearDegradedState.getValue() != null &&
        !clearDegradedState.getValue().equals(
            clearDegradedState.getDefaultValue())) {
      values = new ArrayList<ByteString>(1);
      values.add(ByteString.valueOf("true"));
      attributes.add(
            new LDAPAttribute(ATTR_REBUILD_INDEX_CLEARDEGRADEDSTATE, values));
    }
  }
  /**