From 919ad22e29ed2229d4af6092f5e0ddecdeac779e Mon Sep 17 00:00:00 2001
From: Jean-Noel Rouvignac <jean-noel.rouvignac@forgerock.com>
Date: Thu, 12 Mar 2015 15:00:22 +0000
Subject: [PATCH] Get rid of too much ByteBuffer to ByteString conversions.
---
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ImportIDSet.java | 32 ---
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Importer.java | 261 +++++++++++++------------------------
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexOutputBuffer.java | 23 +-
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexInputBuffer.java | 34 +---
opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Index.java | 12 -
5 files changed, 127 insertions(+), 235 deletions(-)
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ImportIDSet.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ImportIDSet.java
index 8d51830..717a3cc 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ImportIDSet.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/ImportIDSet.java
@@ -26,8 +26,6 @@
*/
package org.opends.server.backends.pluggable;
-import java.nio.ByteBuffer;
-
import org.forgerock.opendj.ldap.ByteSequence;
import org.forgerock.opendj.ldap.ByteString;
import org.forgerock.opendj.ldap.ByteStringBuilder;
@@ -47,7 +45,7 @@
/** Boolean to keep track if the instance is defined or not. */
private boolean isDefined = true;
/** Key related to an ID set. */
- private ByteBuffer key;
+ private ByteSequence key;
/** The index entry limit size. */
private final int indexEntryLimit;
/**
@@ -68,12 +66,14 @@
* Create an import ID set of the specified size, index limit and index
* maintain count, plus an extra 128 slots.
*
+ * @param key The key associated to this ID set
* @param size The size of the the underlying array, plus some extra space.
* @param limit The index entry limit.
* @param maintainCount whether to maintain the count when size is undefined.
*/
- public ImportIDSet(int size, int limit, boolean maintainCount)
+ public ImportIDSet(ByteSequence key, int size, int limit, boolean maintainCount)
{
+ this.key = key;
this.array = new long[size + 128];
// A limit of 0 means unlimited.
this.indexEntryLimit = limit == 0 ? Integer.MAX_VALUE : limit;
@@ -425,16 +425,6 @@
}
/**
- * Create a byte string representing this object's key that is suitable to write to a DB.
- *
- * @return A byte string representing this object's key
- */
- ByteString keyToByteString()
- {
- return ByteString.wrap(key.array(), 0, key.limit());
- }
-
- /**
* Create a byte string representing this object's value that is suitable to write to a DB.
*
* @return A byte string representing this object's value
@@ -459,21 +449,11 @@
}
/**
- * Set the DB key related to an import ID set.
- *
- * @param key Byte array containing the key.
- */
- public void setKey(ByteBuffer key)
- {
- this.key = key;
- }
-
- /**
* Return the DB key related to an import ID set.
*
- * @return The byte array containing the key.
+ * @return The byte string containing the key.
*/
- public ByteBuffer getKey()
+ public ByteSequence getKey()
{
return key;
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Importer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Importer.java
index b2dad1a..520cf1e 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Importer.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Importer.java
@@ -47,7 +47,6 @@
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
-import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
@@ -1952,7 +1951,7 @@
@Override
public Void call() throws Exception
{
- ByteBuffer key = null;
+ ByteStringBuilder key = null;
ImportIDSet insertIDSet = null;
ImportIDSet deleteIDSet = null;
@@ -1979,30 +1978,13 @@
IndexInputBuffer b = bufferSet.pollFirst();
if (key == null)
{
+ key = new ByteStringBuilder(b.getKeyLen());
+
indexID = b.getIndexID();
-
- if (indexMgr.isDN2ID())
- {
- insertIDSet = new ImportIDSet(1, 1, false);
- deleteIDSet = new ImportIDSet(1, 1, false);
- }
- else
- {
- final Index index = idContainerMap.get(indexID);
- int limit = index.getIndexEntryLimit();
- boolean maintainCount = index.getMaintainCount();
- insertIDSet = new ImportIDSet(1, limit, maintainCount);
- deleteIDSet = new ImportIDSet(1, limit, maintainCount);
- }
-
- key = ByteBuffer.allocate(b.getKeyLen());
- key.flip();
b.fetchKey(key);
- b.mergeIDSet(insertIDSet);
- b.mergeIDSet(deleteIDSet);
- insertIDSet.setKey(key);
- deleteIDSet.setKey(key);
+ insertIDSet = newImportIDSet(key, indexID);
+ deleteIDSet = newImportIDSet(key, indexID);
}
else if (b.compare(key, indexID) != 0)
{
@@ -2010,39 +1992,14 @@
keyCount.incrementAndGet();
indexID = b.getIndexID();
-
- if (indexMgr.isDN2ID())
- {
- insertIDSet = new ImportIDSet(1, 1, false);
- deleteIDSet = new ImportIDSet(1, 1, false);
- }
- else
- {
- final Index index = idContainerMap.get(indexID);
- int limit = index.getIndexEntryLimit();
- boolean maintainCount = index.getMaintainCount();
- insertIDSet = new ImportIDSet(1, limit, maintainCount);
- deleteIDSet = new ImportIDSet(1, limit, maintainCount);
- }
-
- key.clear();
- if (b.getKeyLen() > key.capacity())
- {
- key = ByteBuffer.allocate(b.getKeyLen());
- }
- key.flip();
b.fetchKey(key);
- b.mergeIDSet(insertIDSet);
- b.mergeIDSet(deleteIDSet);
- insertIDSet.setKey(key);
- deleteIDSet.setKey(key);
+ insertIDSet = newImportIDSet(key, indexID);
+ deleteIDSet = newImportIDSet(key, indexID);
}
- else
- {
- b.mergeIDSet(insertIDSet);
- b.mergeIDSet(deleteIDSet);
- }
+
+ b.mergeIDSet(insertIDSet);
+ b.mergeIDSet(deleteIDSet);
if (b.hasMoreData())
{
@@ -2069,6 +2026,17 @@
}
}
+ private ImportIDSet newImportIDSet(ByteStringBuilder key, Integer indexID)
+ {
+ if (indexMgr.isDN2ID())
+ {
+ return new ImportIDSet(key, 1, 1, false);
+ }
+
+ final Index index = idContainerMap.get(indexID);
+ return new ImportIDSet(key, 1, index.getIndexEntryLimit(), index.getMaintainCount());
+ }
+
private void addToDB(int indexID, ImportIDSet insertSet, ImportIDSet deleteSet) throws DirectoryException
{
if (indexMgr.isDN2ID())
@@ -2079,20 +2047,18 @@
{
if (deleteSet.size() > 0 || !deleteSet.isDefined())
{
- ByteString key = deleteSet.keyToByteString();
final Index index = idContainerMap.get(indexID);
- index.delete(txn, key, deleteSet);
+ index.delete(txn, deleteSet);
}
if (insertSet.size() > 0 || !insertSet.isDefined())
{
- ByteString key = insertSet.keyToByteString();
final Index index = idContainerMap.get(indexID);
- index.insert(txn, key, insertSet);
+ index.insert(txn, insertSet);
}
}
}
- private void addDN2ID(int indexID, ImportIDSet record) throws DirectoryException
+ private void addDN2ID(int indexID, ImportIDSet idSet) throws DirectoryException
{
DNState dnState;
if (!dnStateMap.containsKey(indexID))
@@ -2104,9 +2070,9 @@
{
dnState = dnStateMap.get(indexID);
}
- if (dnState.checkParent(txn, record))
+ if (dnState.checkParent(txn, idSet))
{
- dnState.writeToDB();
+ dnState.writeToDN2ID(idSet);
}
}
@@ -2119,75 +2085,49 @@
* This class is used to by a index DB merge thread performing DN processing
* to keep track of the state of individual DN2ID index processing.
*/
- class DNState
+ final class DNState
{
private static final int DN_STATE_CACHE_SIZE = 64 * KB;
- private ByteBuffer parentDN, lastDN;
- private EntryID parentID, lastID, entryID;
- private ByteString dnKey, dnValue;
- private final TreeMap<ByteBuffer, EntryID> parentIDMap = new TreeMap<ByteBuffer, EntryID>();
private final EntryContainer entryContainer;
+ private final TreeName dn2id;
+ private final TreeMap<ByteString, EntryID> parentIDMap = new TreeMap<ByteString, EntryID>();
private final Map<ByteString, ImportIDSet> id2childTree = new TreeMap<ByteString, ImportIDSet>();
private final Map<ByteString, ImportIDSet> id2subtreeTree = new TreeMap<ByteString, ImportIDSet>();
private final int childLimit, subTreeLimit;
private final boolean childDoCount, subTreeDoCount;
+ private ByteSequence parentDN;
+ private final ByteStringBuilder lastDN = new ByteStringBuilder();
+ private EntryID parentID, lastID, entryID;
DNState(EntryContainer entryContainer)
{
this.entryContainer = entryContainer;
+ dn2id = entryContainer.getDN2ID().getName();
final Index id2c = entryContainer.getID2Children();
childLimit = id2c.getIndexEntryLimit();
childDoCount = id2c.getMaintainCount();
final Index id2s = entryContainer.getID2Subtree();
subTreeLimit = id2s.getIndexEntryLimit();
subTreeDoCount = id2s.getMaintainCount();
- lastDN = ByteBuffer.allocate(BYTE_BUFFER_CAPACITY);
}
- private ByteBuffer getParent(ByteBuffer buffer)
+ private ByteSequence getParent(ByteSequence dn)
{
- int parentIndex = JebFormat.findDNKeyParent(toByteString(buffer));
+ int parentIndex = JebFormat.findDNKeyParent(dn);
if (parentIndex < 0)
{
// This is the root or base DN
return null;
}
- ByteBuffer parent = buffer.duplicate();
- parent.limit(parentIndex);
- return parent;
- }
-
- private ByteString toByteString(ByteBuffer buffer)
- {
- return ByteString.wrap(buffer.array(), 0, buffer.limit());
- }
-
- private ByteBuffer deepCopy(ByteBuffer srcBuffer, ByteBuffer destBuffer)
- {
- if (destBuffer == null
- || destBuffer.clear().remaining() < srcBuffer.limit())
- {
- byte[] bytes = new byte[srcBuffer.limit()];
- System.arraycopy(srcBuffer.array(), 0, bytes, 0, srcBuffer.limit());
- return ByteBuffer.wrap(bytes);
- }
- else
- {
- destBuffer.put(srcBuffer);
- destBuffer.flip();
- return destBuffer;
- }
+ return dn.subSequence(0, parentIndex).toByteString();
}
/** Why do we still need this if we are checking parents in the first phase? */
- private boolean checkParent(ReadableStorage txn, ImportIDSet record) throws StorageRuntimeException
+ private boolean checkParent(ReadableStorage txn, ImportIDSet idSet) throws StorageRuntimeException
{
- dnKey = record.keyToByteString();
- dnValue = record.valueToByteString();
-
- entryID = new EntryID(dnValue);
- parentDN = getParent(record.getKey());
+ entryID = new EntryID(idSet.valueToByteString());
+ parentDN = getParent(idSet.getKey());
//Bypass the cache for append data, lookup the parent in DN2ID and return.
if (importConfiguration != null
@@ -2196,8 +2136,7 @@
//If null is returned than this is a suffix DN.
if (parentDN != null)
{
- ByteString key = toByteString(parentDN);
- ByteString value = txn.read(entryContainer.getDN2ID().getName(), key);
+ ByteString value = txn.read(dn2id, parentDN);
if (value != null)
{
parentID = new EntryID(value);
@@ -2213,36 +2152,36 @@
}
else if (parentIDMap.isEmpty())
{
- parentIDMap.put(deepCopy(record.getKey(), null), entryID);
+ parentIDMap.put(idSet.getKey().toByteString(), entryID);
return true;
}
- else if (lastDN != null && lastDN.equals(parentDN))
+ else if (lastDN.equals(parentDN))
{
- parentIDMap.put(deepCopy(lastDN, null), lastID);
+ parentIDMap.put(lastDN.toByteString(), lastID);
parentID = lastID;
- lastDN = deepCopy(record.getKey(), lastDN);
+ lastDN.clear().append(idSet.getKey());
lastID = entryID;
return true;
}
else if (parentIDMap.lastKey().equals(parentDN))
{
parentID = parentIDMap.get(parentDN);
- lastDN = deepCopy(record.getKey(), lastDN);
+ lastDN.clear().append(idSet.getKey());
lastID = entryID;
return true;
}
else if (parentIDMap.containsKey(parentDN))
{
EntryID newParentID = parentIDMap.get(parentDN);
- ByteBuffer key = parentIDMap.lastKey();
+ ByteSequence key = parentIDMap.lastKey();
while (!parentDN.equals(key))
{
parentIDMap.remove(key);
key = parentIDMap.lastKey();
}
- parentIDMap.put(deepCopy(record.getKey(), null), entryID);
+ parentIDMap.put(idSet.getKey().toByteString(), entryID);
parentID = newParentID;
- lastDN = deepCopy(record.getKey(), lastDN);
+ lastDN.clear().append(idSet.getKey());
lastID = entryID;
}
else
@@ -2259,18 +2198,7 @@
{
if (parentID != null)
{
- ImportIDSet idSet;
- final ByteString parentIDBytes = parentID.toByteString();
- if (!id2childTree.containsKey(parentIDBytes))
- {
- idSet = new ImportIDSet(1, childLimit, childDoCount);
- id2childTree.put(parentIDBytes, idSet);
- }
- else
- {
- idSet = id2childTree.get(parentIDBytes);
- }
- idSet.addEntryID(childID);
+ getId2childtreeImportIDSet().addEntryID(childID);
if (id2childTree.size() > DN_STATE_CACHE_SIZE)
{
flushMapToDB(id2childTree, entryContainer.getID2Children(), true);
@@ -2283,15 +2211,26 @@
}
}
- private EntryID getParentID(ReadableStorage txn, ByteBuffer dn) throws StorageRuntimeException
+ private ImportIDSet getId2childtreeImportIDSet()
+ {
+ final ByteString parentIDBytes = parentID.toByteString();
+ ImportIDSet idSet = id2childTree.get(parentIDBytes);
+ if (idSet == null)
+ {
+ idSet = new ImportIDSet(parentIDBytes, 1, childLimit, childDoCount);
+ id2childTree.put(parentIDBytes, idSet);
+ }
+ return idSet;
+ }
+
+ private EntryID getParentID(ReadableStorage txn, ByteSequence dn) throws StorageRuntimeException
{
// Bypass the cache for append data, lookup the parent DN in the DN2ID db
if (importConfiguration == null || !importConfiguration.appendToExistingData())
{
return parentIDMap.get(dn);
}
- ByteString key = toByteString(dn);
- ByteString value = txn.read(entryContainer.getDN2ID().getName(), key);
+ ByteString value = txn.read(dn2id, dn);
return value != null ? new EntryID(value) : null;
}
@@ -2299,42 +2238,19 @@
{
if (parentID != null)
{
- ImportIDSet idSet;
- final ByteString parentIDBytes = parentID.toByteString();
- if (!id2subtreeTree.containsKey(parentIDBytes))
- {
- idSet = new ImportIDSet(1, subTreeLimit, subTreeDoCount);
- id2subtreeTree.put(parentIDBytes, idSet);
- }
- else
- {
- idSet = id2subtreeTree.get(parentIDBytes);
- }
- idSet.addEntryID(childID);
+ getId2subtreeImportIDSet(parentID).addEntryID(childID);
// TODO:
// Instead of doing this,
// we can just walk to parent cache if available
- for (ByteBuffer dn = getParent(parentDN); dn != null; dn = getParent(dn))
+ for (ByteSequence dn = getParent(parentDN); dn != null; dn = getParent(dn))
{
EntryID nodeID = getParentID(txn, dn);
- if (nodeID == null)
+ if (nodeID != null)
{
- // We have a missing parent. Maybe parent checking was turned off?
- // Just ignore.
- break;
+ getId2subtreeImportIDSet(nodeID).addEntryID(childID);
}
-
- final ByteString nodeIDBytes = nodeID.toByteString();
- if (!id2subtreeTree.containsKey(nodeIDBytes))
- {
- idSet = new ImportIDSet(1, subTreeLimit, subTreeDoCount);
- id2subtreeTree.put(nodeIDBytes, idSet);
- }
- else
- {
- idSet = id2subtreeTree.get(nodeIDBytes);
- }
- idSet.addEntryID(childID);
+ // else we have a missing parent. Maybe parent checking was turned off?
+ // Just ignore.
}
if (id2subtreeTree.size() > DN_STATE_CACHE_SIZE)
{
@@ -2348,9 +2264,21 @@
}
}
- public void writeToDB() throws DirectoryException
+ private ImportIDSet getId2subtreeImportIDSet(EntryID entryID)
{
- txn.create(entryContainer.getDN2ID().getName(), dnKey, dnValue);
+ ByteString entryIDBytes = entryID.toByteString();
+ ImportIDSet idSet = id2subtreeTree.get(entryIDBytes);
+ if (idSet == null)
+ {
+ idSet = new ImportIDSet(entryIDBytes, 1, subTreeLimit, subTreeDoCount);
+ id2subtreeTree.put(entryIDBytes, idSet);
+ }
+ return idSet;
+ }
+
+ public void writeToDN2ID(ImportIDSet idSet) throws DirectoryException
+ {
+ txn.create(dn2id, idSet.getKey(), entryID.toByteString());
indexMgr.addTotDNCount(1);
if (parentDN != null)
{
@@ -2359,26 +2287,23 @@
}
}
- private void flushMapToDB(Map<ByteString, ImportIDSet> map, Index index,
- boolean clearMap)
+ public void flush()
{
- for (Map.Entry<ByteString, ImportIDSet> e : map.entrySet())
+ flushMapToDB(id2childTree, entryContainer.getID2Children(), false);
+ flushMapToDB(id2subtreeTree, entryContainer.getID2Subtree(), false);
+ }
+
+ private void flushMapToDB(Map<ByteString, ImportIDSet> map, Index index, boolean clearMap)
+ {
+ for (ImportIDSet idSet : map.values())
{
- dnKey = e.getKey();
- ImportIDSet idSet = e.getValue();
- index.insert(txn, dnKey, idSet);
+ index.insert(txn, idSet);
}
if (clearMap)
{
map.clear();
}
}
-
- public void flush()
- {
- flushMapToDB(id2childTree, entryContainer.getID2Children(), false);
- flushMapToDB(id2subtreeTree, entryContainer.getID2Subtree(), false);
- }
}
}
@@ -4151,7 +4076,7 @@
return builder;
}
- /** Create a list of dn made of one element */
+ /** Create a list of dn made of one element. */
private ByteSequence singletonList(final ByteSequence dntoAdd)
{
final ByteStringBuilder singleton = new ByteStringBuilder(dntoAdd.length() + INT_SIZE);
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Index.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Index.java
index 2968293..5389ff8 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Index.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/Index.java
@@ -134,7 +134,7 @@
this.indexEntryLimit = indexEntryLimit;
this.cursorEntryLimit = cursorEntryLimit;
this.maintainCount = maintainCount;
- this.newImportIDSet = new ImportIDSet(indexEntryLimit, indexEntryLimit, maintainCount);
+ this.newImportIDSet = new ImportIDSet(null, indexEntryLimit, indexEntryLimit, maintainCount);
this.state = state;
this.trusted = state.getIndexTrustState(txn, this);
@@ -160,13 +160,12 @@
* Delete the specified import ID set from the import ID set associated with the key.
*
* @param txn The database transaction
- * @param key The key to delete the set from.
* @param importIdSet The import ID set to delete.
* @throws StorageRuntimeException If a database error occurs.
*/
- final void delete(WriteableStorage txn, ByteSequence key, ImportIDSet importIdSet)
- throws StorageRuntimeException
+ final void delete(WriteableStorage txn, ImportIDSet importIdSet) throws StorageRuntimeException
{
+ ByteSequence key = importIdSet.getKey();
ByteString value = txn.read(getName(), key);
if (value != null) {
newImportIDSet.clear();
@@ -190,13 +189,12 @@
* Insert the specified import ID set into this index. Creates a DB cursor if needed.
*
* @param txn The database transaction
- * @param key The key to add the set to.
* @param importIdSet The set of import IDs.
* @throws StorageRuntimeException If a database error occurs.
*/
- final void insert(WriteableStorage txn, ByteSequence key, ImportIDSet importIdSet)
- throws StorageRuntimeException
+ final void insert(WriteableStorage txn, ImportIDSet importIdSet) throws StorageRuntimeException
{
+ ByteSequence key = importIdSet.getKey();
ByteString value = txn.read(getName(), key);
if(value != null) {
newImportIDSet.clear();
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexInputBuffer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexInputBuffer.java
index 65033ff..542abc1 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexInputBuffer.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexInputBuffer.java
@@ -34,6 +34,7 @@
import java.nio.channels.FileChannel;
import org.forgerock.i18n.slf4j.LocalizedLogger;
+import org.forgerock.opendj.ldap.ByteStringBuilder;
import org.opends.server.backends.pluggable.Importer.IndexManager;
/**
@@ -62,7 +63,7 @@
// Next fields are the fetched record data
private Integer indexID;
- private ByteBuffer keyBuf = ByteBuffer.allocate(128);
+ private final ByteStringBuilder keyBuffer = new ByteStringBuilder(128);
private RecordState recordState = RecordState.START;
/**
@@ -96,7 +97,6 @@
loadCache();
cache.flip();
- keyBuf.flip();
}
private void loadCache() throws IOException
@@ -142,19 +142,18 @@
*/
public int getKeyLen()
{
- return keyBuf.limit();
+ return keyBuffer.length();
}
/**
- * Fetches the next key into the provided byte buffer.
+ * Fetches the next key into the provided byte string builder.
*
* @param b
- * A buffer where to fetch the key
+ * A builder where to fetch the key
*/
- public void fetchKey(ByteBuffer b)
+ public void fetchKey(ByteStringBuilder b)
{
- keyBuf.get(b.array(), 0, keyBuf.limit());
- b.limit(keyBuf.limit());
+ b.clear().append(keyBuffer);
}
/**
@@ -208,14 +207,8 @@
ensureData(20);
int keyLen = getInt();
- if (keyLen > keyBuf.capacity())
- {
- keyBuf = ByteBuffer.allocate(keyLen);
- }
ensureData(keyLen);
- keyBuf.clear();
- cache.get(keyBuf.array(), 0, keyLen);
- keyBuf.limit(keyLen);
+ keyBuffer.clear().append(cache, keyLen);
recordState = RecordState.NEED_INSERT_ID_SET;
}
@@ -304,11 +297,10 @@
* index ID, a positive number if this buffer is greater, or zero if
* it is the same.
*/
- int compare(ByteBuffer cKey, Integer cIndexID)
+ int compare(ByteStringBuilder cKey, Integer cIndexID)
{
ensureRecordFetched();
- int cmp = Importer.indexComparator.compare(keyBuf.array(), 0, keyBuf.limit(), cKey.array(), cKey.limit());
- if (cmp == 0)
+ if (Importer.indexComparator.compare(keyBuffer, cKey) == 0)
{
return (indexID.intValue() == cIndexID.intValue()) ? 0 : 1;
}
@@ -328,9 +320,7 @@
ensureRecordFetched();
o.ensureRecordFetched();
- byte[] oKey = o.keyBuf.array();
- int oLen = o.keyBuf.limit();
- int cmp = Importer.indexComparator.compare(keyBuf.array(), 0, keyBuf.limit(), oKey, oLen);
+ int cmp = Importer.indexComparator.compare(keyBuffer, o.keyBuffer);
if (cmp == 0)
{
cmp = indexID.intValue() - o.getIndexID().intValue();
@@ -344,7 +334,7 @@
private void ensureRecordFetched()
{
- if (keyBuf.limit() == 0)
+ if (keyBuffer.length() == 0)
{
getIndexID();
}
diff --git a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexOutputBuffer.java b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexOutputBuffer.java
index 6e2faa2..365dcf1 100644
--- a/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexOutputBuffer.java
+++ b/opendj-server-legacy/src/main/java/org/opends/server/backends/pluggable/IndexOutputBuffer.java
@@ -31,9 +31,9 @@
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
-import java.nio.ByteBuffer;
import org.forgerock.opendj.ldap.ByteSequence;
+import org.forgerock.opendj.ldap.ByteStringBuilder;
/**
* This class represents a index buffer used to store the keys and entry IDs
@@ -114,7 +114,7 @@
* This buffer is reused during key compares. It's main purpose is to keep
* memory footprint as small as possible.
*/
- private ByteBuffer keyBuffer = ByteBuffer.allocate(CAP);
+ private ByteStringBuilder keyBuffer = new ByteStringBuilder(CAP);
/**
* Set to {@code true} if the buffer should not be recycled. Used when the
@@ -361,19 +361,13 @@
}
/** Used to minimized memory usage when comparing keys. */
- private ByteBuffer getKeyBuf(int position)
+ private ByteStringBuilder getKeyBuf(int position)
{
keyBuffer.clear();
int offSet = getOffset(position) + REC_OVERHEAD + LONG_SIZE;
int keyLen = readInt(buffer, offSet);
offSet += INT_SIZE;
- //Re-allocate if the key is bigger than the capacity.
- if(keyLen > keyBuffer.capacity())
- {
- keyBuffer = ByteBuffer.allocate(keyLen);
- }
- keyBuffer.put(buffer, offSet, keyLen);
- keyBuffer.flip();
+ keyBuffer.append(buffer, offSet, keyLen);
return keyBuffer;
}
@@ -479,14 +473,14 @@
@Override
public int compareTo(IndexOutputBuffer b)
{
- final ByteBuffer keyBuf = b.getKeyBuf(b.position);
+ final ByteStringBuilder keyBuf = b.getKeyBuf(b.position);
int offset = getOffset(position);
int indexID = getIndexIDFromOffset(offset);
offset += REC_OVERHEAD + LONG_SIZE;
int keyLen = readInt(buffer, offset);
int key = INT_SIZE + offset;
- int cmp = indexComparator.compare(buffer, key, keyLen, keyBuf.array(), keyBuf.limit());
+ int cmp = indexComparator.compare(buffer, key, keyLen, keyBuf.getBackingArray(), keyBuf.length());
if (cmp == 0)
{
cmp = compareInts(indexID, b.getIndexID());
@@ -751,6 +745,11 @@
return cmp;
}
+ int compare(ByteStringBuilder key1, ByteStringBuilder key2)
+ {
+ return compare(key1.getBackingArray(), 0, key1.length(), key2.getBackingArray(), key2.length());
+ }
+
/**
* Compare an offset in an byte array with the specified byte array,
* using the DN compare algorithm.
--
Gitblit v1.10.0