From a069e2643d6be5b34309179220b2777d42df13fc Mon Sep 17 00:00:00 2001
From: Jean-Noël Rouvignac <jean-noel.rouvignac@forgerock.com>
Date: Fri, 02 Oct 2015 15:34:44 +0000
Subject: [PATCH] Final manual cleanup

---
 opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/OnDiskMergeImporter.java |   21 +++++++--------------
 1 files changed, 7 insertions(+), 14 deletions(-)

diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/OnDiskMergeImporter.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/OnDiskMergeImporter.java
index 83f4f5a..265e353 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/OnDiskMergeImporter.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/OnDiskMergeImporter.java
@@ -824,16 +824,6 @@
       visitIndexes(entryContainer, new TrustModifier(importer, true));
     }
 
-    final Set<EntryContainer> extractEntryContainers(Collection<TreeName> treeNames)
-    {
-      final Set<EntryContainer> containers = new HashSet<>();
-      for(TreeName treeName : treeNames)
-      {
-        containers.add(entryContainers.get(treeName.getBaseDN()));
-      }
-      return containers;
-    }
-
     final void clearEntryContainerTrees(EntryContainer entryContainer)
     {
       for(Tree tree : entryContainer.listTrees())
@@ -1198,8 +1188,8 @@
 
   /**
    * Chunk implementations are a data storage with an optional limited capacity. Chunk are typically used by first
-   * adding data to the storage using {@link put(ByteSequence, ByteSequence)} later on data can be sequentially accessed
-   * using {@link flip()}.
+   * adding data to the storage using {@link #put(ByteSequence, ByteSequence)} later on data can be sequentially accessed
+   * using {@link #flip()}.
    */
   interface Chunk
   {
@@ -1235,8 +1225,9 @@
    * Store and sort data into multiple chunks. Thanks to the chunk rolling mechanism, this chunk can sort and store an
    * unlimited amount of data. This class uses double-buffering: data are firstly stored in a
    * {@link InMemorySortedChunk} which, once full, will be asynchronously sorted and copied into a
-   * {@link FileRegionChunk}. Duplicate keys are reduced by a {@link Collector}. {@link #put(ByteSequence,
-   * ByteSequence))} is thread-safe. This class is used in phase-one. There is one {@link ExternalSortChunk} per
+   * {@link FileRegionChunk}. Duplicate keys are reduced by a {@link Collector}.
+   * {@link #put(ByteSequence, ByteSequence))} is thread-safe.
+   * This class is used in phase-one. There is one {@link ExternalSortChunk} per
    * database tree, shared across all phase-one importer threads, in charge of storing/sorting records.
    */
   static final class ExternalSortChunk implements Chunk
@@ -2264,6 +2255,7 @@
     @Override
     public void close()
     {
+      // nothing to do
     }
   }
 
@@ -2394,6 +2386,7 @@
         @Override
         public void close()
         {
+          // nothing to do
         }
 
         @Override

--
Gitblit v1.10.0