From aafa726170b737638c4f5404aba49ac7c89d00a2 Mon Sep 17 00:00:00 2001
From: Jean-Noel Rouvignac <jean-noel.rouvignac@forgerock.com>
Date: Thu, 28 May 2015 15:48:20 +0000
Subject: [PATCH] OPENDJ-2016 Implement new on disk merge import strategy based on storage engine
---
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/OnDiskMergeStorageImporter.java | 24 ++++++++++--------------
1 files changed, 10 insertions(+), 14 deletions(-)
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/OnDiskMergeStorageImporter.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/OnDiskMergeStorageImporter.java
index 626412d..af3cb55 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/OnDiskMergeStorageImporter.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/OnDiskMergeStorageImporter.java
@@ -406,7 +406,7 @@
* This will be persisted once {@link #maximumExpectedSizeOnDisk} reaches the
* {@link #bufferSize}.
* <p>
- * This code uses a {@link ConcurrentHashMap} instead of a {@link ConcurrentSkipListMap} because
+ * This code uses a {@link ConcurrentHashMap} instead of a {@code ConcurrentSkipListMap} because
* during performance testing it was found this code spent a lot of time in
* {@link ByteString#compareTo(ByteSequence)} when putting entries to the map. However, at this
* point, we only need to put very quickly data in the map, we do not need keys to be sorted.
@@ -1684,13 +1684,13 @@
{
for (int i = 0; i < threadCount - 1; i++)
{
- tasks.add(new ImportTask(tmpImporter, id2EntryPutTask));
+ tasks.add(new ImportTask(tmpImporter, backendStorage, id2EntryPutTask));
}
}
execService.invokeAll(tasks);
tasks.clear();
- execService.submit(new MigrateExcludedTask(backendStorage, tmpImporter, id2EntryPutTask)).get();
+ execService.submit(new MigrateExcludedTask(tmpImporter, backendStorage, id2EntryPutTask)).get();
id2EntryPutTask.finishedWrites();
dn2IdPutFuture.get();
}
@@ -1972,12 +1972,9 @@
/** Task used to migrate excluded branch. */
private final class MigrateExcludedTask extends ImportTask
{
- private final Storage storage;
-
- private MigrateExcludedTask(Storage storage, Importer importer, Id2EntryPutTask id2EntryPutTask)
+ private MigrateExcludedTask(Importer importer, Storage storage, Id2EntryPutTask id2EntryPutTask)
{
- super(importer, id2EntryPutTask);
- this.storage = storage;
+ super(importer, storage, id2EntryPutTask);
}
@Override
@@ -2050,12 +2047,9 @@
/** Task to migrate existing entries. */
private final class MigrateExistingEntriesTask extends ImportTask
{
- private final Storage storage;
-
private MigrateExistingEntriesTask(final Storage storage, Importer importer, Id2EntryPutTask id2EntryPutTask)
{
- super(importer, id2EntryPutTask);
- this.storage = storage;
+ super(importer, storage, id2EntryPutTask);
}
@Override
@@ -2149,11 +2143,13 @@
private class ImportTask implements Callable<Void>
{
private final Importer importer;
+ final Storage storage;
private final Id2EntryPutTask id2EntryPutTask;
- public ImportTask(final Importer importer, Id2EntryPutTask id2EntryPutTask)
+ public ImportTask(Importer importer, Storage storage, Id2EntryPutTask id2EntryPutTask)
{
this.importer = importer;
+ this.storage = storage;
this.id2EntryPutTask = id2EntryPutTask;
}
@@ -2226,7 +2222,7 @@
//Perform parent checking.
DN entryDN = entry.getName();
DN parentDN = suffix.getEntryContainer().getParentWithinBase(entryDN);
- DNCache dnCache = new Dn2IdDnCache(suffix, rootContainer.getStorage());
+ DNCache dnCache = new Dn2IdDnCache(suffix, storage);
if (parentDN != null && !suffix.isParentProcessed(parentDN, dnCache))
{
reader.rejectEntry(entry, ERR_IMPORT_PARENT_NOT_FOUND.get(parentDN));
--
Gitblit v1.10.0